repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
nthiep/global-ssh-server | lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/base.py | 103 | 9642 | """
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import logging
import sys
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
from django.db.backends.postgresql_psycopg2.client import DatabaseClient
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.db.backends.postgresql_psycopg2.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.utils.encoding import force_str
from django.utils.safestring import SafeText, SafeBytes
from django.utils import six
from django.utils.timezone import utc
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
logger = logging.getLogger('django.db.backends')
def utc_tzinfo_factory(offset):
if offset != 0:
raise AssertionError("database connection isn't set to UTC")
return utc
class CursorWrapper(object):
"""
A thin wrapper around psycopg2's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
"""
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
except Database.DatabaseError as e:
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
except Database.DatabaseError as e:
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = True
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
can_defer_constraint_checks = True
has_select_for_update = True
has_select_for_update_nowait = True
has_bulk_insert = True
supports_tablespaces = True
supports_transactions = True
can_distinct_on_fields = True
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
autocommit = self.settings_dict["OPTIONS"].get('autocommit', False)
self.features.uses_autocommit = autocommit
if autocommit:
level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
else:
level = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
self._set_isolation_level(level)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
self._pg_version = None
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def close(self):
self.validate_thread_sharing()
if self.connection is None:
return
try:
self.connection.close()
self.connection = None
except Database.Error:
# In some cases (database restart, network connection lost etc...)
# the connection to the database is lost without giving Django a
# notification. If we don't set self.connection to None, the error
# will occur a every request.
self.connection = None
logger.warning('psycopg2 error while closing the connection.',
exc_info=sys.exc_info()
)
raise
def _get_pg_version(self):
if self._pg_version is None:
self._pg_version = get_version(self.connection)
return self._pg_version
pg_version = property(_get_pg_version)
def _cursor(self):
settings_dict = self.settings_dict
if self.connection is None:
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'],
}
conn_params.update(settings_dict['OPTIONS'])
if 'autocommit' in conn_params:
del conn_params['autocommit']
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
self.connection = Database.connect(**conn_params)
self.connection.set_client_encoding('UTF8')
tz = 'UTC' if settings.USE_TZ else settings_dict.get('TIME_ZONE')
if tz:
try:
get_parameter_status = self.connection.get_parameter_status
except AttributeError:
# psycopg2 < 2.0.12 doesn't have get_parameter_status
conn_tz = None
else:
conn_tz = get_parameter_status('TimeZone')
if conn_tz != tz:
# Set the time zone in autocommit mode (see #17062)
self.connection.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.connection.cursor().execute(
self.ops.set_time_zone_sql(), [tz])
self.connection.set_isolation_level(self.isolation_level)
self._get_pg_version()
connection_created.send(sender=self.__class__, connection=self)
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return CursorWrapper(cursor)
def _enter_transaction_management(self, managed):
"""
Switch the isolation level when needing transaction support, so that
the same transaction is visible across all the queries.
"""
if self.features.uses_autocommit and managed and not self.isolation_level:
self._set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
def _leave_transaction_management(self, managed):
"""
If the normal operating mode is "autocommit", switch back to that when
leaving transaction management.
"""
if self.features.uses_autocommit and not managed and self.isolation_level:
self._set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
def _set_isolation_level(self, level):
"""
Do all the related feature configurations for changing isolation
levels. This doesn't touch the uses_autocommit feature, since that
controls the movement *between* isolation levels.
"""
assert level in range(5)
try:
if self.connection is not None:
self.connection.set_isolation_level(level)
finally:
self.isolation_level = level
self.features.uses_savepoints = bool(level)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
| agpl-3.0 |
163gal/Time-Line | libs_arm/wx/py/shell.py | 4 | 57340 | """Shell is an interactive text control in which a user types in
commands to be sent to the interpreter. This particular shell is
based on wxPython's wxStyledTextCtrl.
Sponsored by Orbtech - Your source for Python programming expertise."""
__author__ = "Patrick K. O'Brien <pobrien@orbtech.com>"
__cvsid__ = "$Id$"
__revision__ = "$Revision$"[11:-2]
import wx
from wx import stc
import keyword
import os
import sys
import time
from buffer import Buffer
import dispatcher
import editwindow
import frame
from pseudo import PseudoFileIn
from pseudo import PseudoFileOut
from pseudo import PseudoFileErr
from version import VERSION
from magic import magic
from path import ls,cd,pwd,sx
sys.ps3 = '<-- ' # Input prompt.
USE_MAGIC=True
# Force updates from long-running commands after this many seconds
PRINT_UPDATE_MAX_TIME=2
NAVKEYS = (wx.WXK_END, wx.WXK_LEFT, wx.WXK_RIGHT,
wx.WXK_UP, wx.WXK_DOWN, wx.WXK_PRIOR, wx.WXK_NEXT)
class ShellFrame(frame.Frame, frame.ShellFrameMixin):
"""Frame containing the shell component."""
name = 'Shell Frame'
revision = __revision__
def __init__(self, parent=None, id=-1, title='PyShell',
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE, locals=None,
InterpClass=None,
config=None, dataDir=None,
*args, **kwds):
"""Create ShellFrame instance."""
frame.Frame.__init__(self, parent, id, title, pos, size, style)
frame.ShellFrameMixin.__init__(self, config, dataDir)
if size == wx.DefaultSize:
self.SetSize((750, 525))
intro = 'PyShell %s - The Flakiest Python Shell' % VERSION
self.SetStatusText(intro.replace('\n', ', '))
self.shell = Shell(parent=self, id=-1, introText=intro,
locals=locals, InterpClass=InterpClass,
startupScript=self.startupScript,
execStartupScript=self.execStartupScript,
*args, **kwds)
# Override the shell so that status messages go to the status bar.
self.shell.setStatusText = self.SetStatusText
self.shell.SetFocus()
self.LoadSettings()
def OnClose(self, event):
"""Event handler for closing."""
# This isn't working the way I want, but I'll leave it for now.
if self.shell.waiting:
if event.CanVeto():
event.Veto(True)
else:
self.SaveSettings()
self.shell.destroy()
self.Destroy()
def OnAbout(self, event):
"""Display an About window."""
title = 'About PyShell'
text = 'PyShell %s\n\n' % VERSION + \
'Yet another Python shell, only flakier.\n\n' + \
'Half-baked by Patrick K. O\'Brien,\n' + \
'the other half is still in the oven.\n\n' + \
'Shell Revision: %s\n' % self.shell.revision + \
'Interpreter Revision: %s\n\n' % self.shell.interp.revision + \
'Platform: %s\n' % sys.platform + \
'Python Version: %s\n' % sys.version.split()[0] + \
'wxPython Version: %s\n' % wx.VERSION_STRING + \
('\t(%s)\n' % ", ".join(wx.PlatformInfo[1:]))
dialog = wx.MessageDialog(self, text, title,
wx.OK | wx.ICON_INFORMATION)
dialog.ShowModal()
dialog.Destroy()
def OnHelp(self, event):
"""Show a help dialog."""
frame.ShellFrameMixin.OnHelp(self, event)
def LoadSettings(self):
if self.config is not None:
frame.ShellFrameMixin.LoadSettings(self)
frame.Frame.LoadSettings(self, self.config)
self.shell.LoadSettings(self.config)
def SaveSettings(self, force=False):
if self.config is not None:
frame.ShellFrameMixin.SaveSettings(self)
if self.autoSaveSettings or force:
frame.Frame.SaveSettings(self, self.config)
self.shell.SaveSettings(self.config)
def DoSaveSettings(self):
if self.config is not None:
self.SaveSettings(force=True)
self.config.Flush()
HELP_TEXT = """\
* Key bindings:
Home Go to the beginning of the command or line.
Shift+Home Select to the beginning of the command or line.
Shift+End Select to the end of the line.
End Go to the end of the line.
Ctrl+C Copy selected text, removing prompts.
Ctrl+Shift+C Copy selected text, retaining prompts.
Alt+C Copy to the clipboard, including prefixed prompts.
Ctrl+X Cut selected text.
Ctrl+V Paste from clipboard.
Ctrl+Shift+V Paste and run multiple commands from clipboard.
Ctrl+Up Arrow Retrieve Previous History item.
Alt+P Retrieve Previous History item.
Ctrl+Down Arrow Retrieve Next History item.
Alt+N Retrieve Next History item.
Shift+Up Arrow Insert Previous History item.
Shift+Down Arrow Insert Next History item.
F8 Command-completion of History item.
(Type a few characters of a previous command and press F8.)
Ctrl+Enter Insert new line into multiline command.
Ctrl+] Increase font size.
Ctrl+[ Decrease font size.
Ctrl+= Default font size.
Ctrl-Space Show Auto Completion.
Ctrl-Alt-Space Show Call Tip.
Shift+Enter Complete Text from History.
Ctrl+F Search
F3 Search next
Ctrl+H "hide" lines containing selection / "unhide"
F12 on/off "free-edit" mode
"""
class ShellFacade:
"""Simplified interface to all shell-related functionality.
This is a semi-transparent facade, in that all attributes of other
are accessible, even though only some are visible to the user."""
name = 'Shell Interface'
revision = __revision__
def __init__(self, other):
"""Create a ShellFacade instance."""
d = self.__dict__
d['other'] = other
d['helpText'] = HELP_TEXT
d['this'] = other.this
def help(self):
"""Display some useful information about how to use the shell."""
self.write(self.helpText)
def __getattr__(self, name):
if hasattr(self.other, name):
return getattr(self.other, name)
else:
raise AttributeError, name
def __setattr__(self, name, value):
if self.__dict__.has_key(name):
self.__dict__[name] = value
elif hasattr(self.other, name):
setattr(self.other, name, value)
else:
raise AttributeError, name
def _getAttributeNames(self):
"""Return list of magic attributes to extend introspection."""
list = [
'about',
'ask',
'autoCallTip',
'autoComplete',
'autoCompleteAutoHide',
'autoCompleteCaseInsensitive',
'autoCompleteIncludeDouble',
'autoCompleteIncludeMagic',
'autoCompleteIncludeSingle',
'callTipInsert',
'clear',
'pause',
'prompt',
'quit',
'redirectStderr',
'redirectStdin',
'redirectStdout',
'run',
'runfile',
'wrap',
'zoom',
]
list.sort()
return list
#DNM
DISPLAY_TEXT="""
Author: %r
Py Version: %s
Py Shell Revision: %s
Py Interpreter Revision: %s
Python Version: %s
wxPython Version: %s
wxPython PlatformInfo: %s
Platform: %s"""
class Shell(editwindow.EditWindow):
"""Shell based on StyledTextCtrl."""
name = 'Shell'
revision = __revision__
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.CLIP_CHILDREN,
introText='', locals=None, InterpClass=None,
startupScript=None, execStartupScript=True,
*args, **kwds):
"""Create Shell instance."""
editwindow.EditWindow.__init__(self, parent, id, pos, size, style)
self.wrap()
if locals is None:
import __main__
locals = __main__.__dict__
# Grab these so they can be restored by self.redirect* methods.
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
# Import a default interpreter class if one isn't provided.
if InterpClass == None:
from interpreter import Interpreter
else:
Interpreter = InterpClass
# Create a replacement for stdin.
self.reader = PseudoFileIn(self.readline, self.readlines)
self.reader.input = ''
self.reader.isreading = False
# Set up the interpreter.
self.interp = Interpreter(locals=locals,
rawin=self.raw_input,
stdin=self.reader,
stdout=PseudoFileOut(self.writeOut),
stderr=PseudoFileErr(self.writeErr),
*args, **kwds)
# Set up the buffer.
self.buffer = Buffer()
# Find out for which keycodes the interpreter will autocomplete.
self.autoCompleteKeys = self.interp.getAutoCompleteKeys()
# Keep track of the last non-continuation prompt positions.
self.promptPosStart = 0
self.promptPosEnd = 0
# Keep track of multi-line commands.
self.more = False
# For use with forced updates during long-running scripts
self.lastUpdate=None
# Create the command history. Commands are added into the
# front of the list (ie. at index 0) as they are entered.
# self.historyIndex is the current position in the history; it
# gets incremented as you retrieve the previous command,
# decremented as you retrieve the next, and reset when you hit
# Enter. self.historyIndex == -1 means you're on the current
# command, not in the history.
self.history = []
self.historyIndex = -1
#seb add mode for "free edit"
self.noteMode = 0
self.MarkerDefine(0,stc.STC_MARK_ROUNDRECT) # marker for hidden
self.searchTxt = ""
# Assign handlers for keyboard events.
self.Bind(wx.EVT_CHAR, self.OnChar)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
# Assign handler for the context menu
self.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUI)
# Assign handlers for edit events
self.Bind(wx.EVT_MENU, lambda evt: self.Cut(), id=wx.ID_CUT)
self.Bind(wx.EVT_MENU, lambda evt: self.Copy(), id=wx.ID_COPY)
self.Bind(wx.EVT_MENU, lambda evt: self.CopyWithPrompts(), id=frame.ID_COPY_PLUS)
self.Bind(wx.EVT_MENU, lambda evt: self.Paste(), id=wx.ID_PASTE)
self.Bind(wx.EVT_MENU, lambda evt: self.PasteAndRun(), id=frame.ID_PASTE_PLUS)
self.Bind(wx.EVT_MENU, lambda evt: self.SelectAll(), id=wx.ID_SELECTALL)
self.Bind(wx.EVT_MENU, lambda evt: self.Clear(), id=wx.ID_CLEAR)
self.Bind(wx.EVT_MENU, lambda evt: self.Undo(), id=wx.ID_UNDO)
self.Bind(wx.EVT_MENU, lambda evt: self.Redo(), id=wx.ID_REDO)
# Assign handler for idle time.
self.waiting = False
self.Bind(wx.EVT_IDLE, self.OnIdle)
# Display the introductory banner information.
self.showIntro(introText)
# Assign some pseudo keywords to the interpreter's namespace.
self.setBuiltinKeywords()
# Add 'shell' to the interpreter's local namespace.
self.setLocalShell()
## NOTE: See note at bottom of this file...
## #seb: File drag and drop
## self.SetDropTarget( FileDropTarget(self) )
# Do this last so the user has complete control over their
# environment. They can override anything they want.
if execStartupScript:
if startupScript is None:
startupScript = os.environ.get('PYTHONSTARTUP')
self.execStartupScript(startupScript)
else:
self.prompt()
wx.CallAfter(self.ScrollToLine, 0)
def clearHistory(self):
self.history = []
self.historyIndex = -1
dispatcher.send(signal="Shell.clearHistory")
def destroy(self):
del self.interp
def setFocus(self):
"""Set focus to the shell."""
self.SetFocus()
def OnIdle(self, event):
"""Free the CPU to do other things."""
if self.waiting:
time.sleep(0.05)
event.Skip()
def showIntro(self, text=''):
"""Display introductory text in the shell."""
if text:
self.write(text)
try:
if self.interp.introText:
if text and not text.endswith(os.linesep):
self.write(os.linesep)
self.write(self.interp.introText)
except AttributeError:
pass
def setBuiltinKeywords(self):
"""Create pseudo keywords as part of builtins.
This sets "close", "exit" and "quit" to a helpful string.
"""
import __builtin__
__builtin__.close = __builtin__.exit = __builtin__.quit = \
'Click on the close button to leave the application.'
__builtin__.cd = cd
__builtin__.ls = ls
__builtin__.pwd = pwd
__builtin__.sx = sx
def quit(self):
"""Quit the application."""
# XXX Good enough for now but later we want to send a close event.
# In the close event handler we can make sure they want to
# quit. Other applications, like PythonCard, may choose to
# hide rather than quit so we should just post the event and
# let the surrounding app decide what it wants to do.
self.write('Click on the close button to leave the application.')
def setLocalShell(self):
"""Add 'shell' to locals as reference to ShellFacade instance."""
self.interp.locals['shell'] = ShellFacade(other=self)
def execStartupScript(self, startupScript):
"""Execute the user's PYTHONSTARTUP script if they have one."""
if startupScript and os.path.isfile(startupScript):
text = 'Startup script executed: ' + startupScript
self.push('print %r; execfile(%r)' % (text, startupScript))
self.interp.startupScript = startupScript
else:
self.push('')
def about(self):
"""Display information about Py."""
#DNM
text = DISPLAY_TEXT % \
(__author__, VERSION, self.revision, self.interp.revision,
sys.version.split()[0], wx.VERSION_STRING, str(wx.PlatformInfo),
sys.platform)
self.write(text.strip())
def OnChar(self, event):
"""Keypress event handler.
Only receives an event if OnKeyDown calls event.Skip() for the
corresponding event."""
if self.noteMode:
event.Skip()
return
# Prevent modification of previously submitted
# commands/responses.
if not self.CanEdit():
return
key = event.GetKeyCode()
currpos = self.GetCurrentPos()
stoppos = self.promptPosEnd
# Return (Enter) needs to be ignored in this handler.
if key in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER]:
pass
elif key in self.autoCompleteKeys:
# Usually the dot (period) key activates auto completion.
# Get the command between the prompt and the cursor. Add
# the autocomplete character to the end of the command.
if self.AutoCompActive():
self.AutoCompCancel()
command = self.GetTextRange(stoppos, currpos) + chr(key)
self.write(chr(key))
if self.autoComplete:
self.autoCompleteShow(command)
elif key == ord('('):
# The left paren activates a call tip and cancels an
# active auto completion.
if self.AutoCompActive():
self.AutoCompCancel()
# Get the command between the prompt and the cursor. Add
# the '(' to the end of the command.
self.ReplaceSelection('')
command = self.GetTextRange(stoppos, currpos) + '('
self.write('(')
self.autoCallTipShow(command, self.GetCurrentPos() == self.GetTextLength())
else:
# Allow the normal event handling to take place.
event.Skip()
def OnKeyDown(self, event):
"""Key down event handler."""
key = event.GetKeyCode()
# If the auto-complete window is up let it do its thing.
if self.AutoCompActive():
event.Skip()
return
# Prevent modification of previously submitted
# commands/responses.
controlDown = event.ControlDown()
rawControlDown = event.RawControlDown()
altDown = event.AltDown()
shiftDown = event.ShiftDown()
currpos = self.GetCurrentPos()
endpos = self.GetTextLength()
selecting = self.GetSelectionStart() != self.GetSelectionEnd()
if (rawControlDown or controlDown) and shiftDown and key in (ord('F'), ord('f')):
li = self.GetCurrentLine()
m = self.MarkerGet(li)
if m & 1<<0:
startP = self.PositionFromLine(li)
self.MarkerDelete(li, 0)
maxli = self.GetLineCount()
li += 1 # li stayed visible as header-line
li0 = li
while li<maxli and self.GetLineVisible(li) == 0:
li += 1
endP = self.GetLineEndPosition(li-1)
self.ShowLines(li0, li-1)
# select reappearing text to allow "hide again"
self.SetSelection( startP, endP )
return
startP,endP = self.GetSelection()
endP-=1
startL = self.LineFromPosition(startP)
endL = self.LineFromPosition(endP)
# never hide last prompt
if endL == self.LineFromPosition(self.promptPosEnd):
endL -= 1
m = self.MarkerGet(startL)
self.MarkerAdd(startL, 0)
self.HideLines(startL+1,endL)
self.SetCurrentPos( startP ) # to ensure caret stays visible !
if key == wx.WXK_F12: #seb
if self.noteMode:
# self.promptPosStart not used anyway - or ?
self.promptPosEnd = \
self.PositionFromLine( self.GetLineCount()-1 ) + \
len(str(sys.ps1))
self.GotoLine(self.GetLineCount())
self.GotoPos(self.promptPosEnd)
self.prompt() #make sure we have a prompt
self.SetCaretForeground("black")
self.SetCaretWidth(1) #default
self.SetCaretPeriod(500) #default
else:
self.SetCaretForeground("red")
self.SetCaretWidth(4)
self.SetCaretPeriod(0) #steady
self.noteMode = not self.noteMode
return
if self.noteMode:
event.Skip()
return
# Return (Enter) is used to submit a command to the
# interpreter.
if (not (rawControlDown or controlDown) and not shiftDown and not altDown) and \
key in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER]:
if self.CallTipActive():
self.CallTipCancel()
self.processLine()
# Complete Text (from already typed words)
elif shiftDown and key in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER]:
self.OnShowCompHistory()
# Ctrl+Return (Ctrl+Enter) is used to insert a line break.
elif (rawControlDown or controlDown) and key in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER]:
if self.CallTipActive():
self.CallTipCancel()
if currpos == endpos:
self.processLine()
else:
self.insertLineBreak()
# Let Ctrl-Alt-* get handled normally.
elif (rawControlDown or controlDown) and altDown:
event.Skip()
# Clear the current, unexecuted command.
elif key == wx.WXK_ESCAPE:
if self.CallTipActive():
event.Skip()
else:
self.clearCommand()
# Clear the current command
elif key == wx.WXK_BACK and (rawControlDown or controlDown) and shiftDown:
self.clearCommand()
# Increase font size.
elif (rawControlDown or controlDown) and key in (ord(']'), wx.WXK_NUMPAD_ADD):
dispatcher.send(signal='FontIncrease')
# Decrease font size.
elif (rawControlDown or controlDown) and key in (ord('['), wx.WXK_NUMPAD_SUBTRACT):
dispatcher.send(signal='FontDecrease')
# Default font size.
elif (rawControlDown or controlDown) and key in (ord('='), wx.WXK_NUMPAD_DIVIDE):
dispatcher.send(signal='FontDefault')
# Cut to the clipboard.
elif ((rawControlDown or controlDown) and key in (ord('X'), ord('x'))) \
or (shiftDown and key == wx.WXK_DELETE):
self.Cut()
# Copy to the clipboard.
elif (rawControlDown or controlDown) and not shiftDown \
and key in (ord('C'), ord('c'), wx.WXK_INSERT):
self.Copy()
# Copy to the clipboard, including prompts.
elif (rawControlDown or controlDown) and shiftDown \
and key in (ord('C'), ord('c'), wx.WXK_INSERT):
self.CopyWithPrompts()
# Copy to the clipboard, including prefixed prompts.
elif altDown and not controlDown \
and key in (ord('C'), ord('c'), wx.WXK_INSERT):
self.CopyWithPromptsPrefixed()
# Home needs to be aware of the prompt.
elif (rawControlDown or controlDown) and key == wx.WXK_HOME:
home = self.promptPosEnd
if currpos > home:
self.SetCurrentPos(home)
if not selecting and not shiftDown:
self.SetAnchor(home)
self.EnsureCaretVisible()
else:
event.Skip()
# Home needs to be aware of the prompt.
elif key == wx.WXK_HOME:
home = self.promptPosEnd
if currpos > home:
[line_str,line_len] = self.GetCurLine()
pos=self.GetCurrentPos()
if line_str[:4] in [sys.ps1,sys.ps2,sys.ps3]:
self.SetCurrentPos(pos+4-line_len)
#self.SetCurrentPos(home)
if not selecting and not shiftDown:
self.SetAnchor(pos+4-line_len)
self.EnsureCaretVisible()
else:
event.Skip()
else:
event.Skip()
#
# The following handlers modify text, so we need to see if
# there is a selection that includes text prior to the prompt.
#
# Don't modify a selection with text prior to the prompt.
elif selecting and key not in NAVKEYS and not self.CanEdit():
pass
# Paste from the clipboard.
elif ((rawControlDown or controlDown) and not shiftDown and key in (ord('V'), ord('v'))) \
or (shiftDown and not controlDown and key == wx.WXK_INSERT):
self.Paste()
# manually invoke AutoComplete and Calltips
elif (rawControlDown or controlDown) and key == wx.WXK_SPACE:
self.OnCallTipAutoCompleteManually(shiftDown)
# Paste from the clipboard, run commands.
elif (rawControlDown or controlDown) and shiftDown and key in (ord('V'), ord('v')):
self.PasteAndRun()
# Replace with the previous command from the history buffer.
elif ((rawControlDown or controlDown) and not shiftDown and key == wx.WXK_UP) \
or (altDown and key in (ord('P'), ord('p'))):
self.OnHistoryReplace(step=+1)
# Replace with the next command from the history buffer.
elif ((rawControlDown or controlDown) and not shiftDown and key == wx.WXK_DOWN) \
or (altDown and key in (ord('N'), ord('n'))):
self.OnHistoryReplace(step=-1)
# Insert the previous command from the history buffer.
elif ((rawControlDown or controlDown) and shiftDown and key == wx.WXK_UP) and self.CanEdit():
self.OnHistoryInsert(step=+1)
# Insert the next command from the history buffer.
elif ((rawControlDown or controlDown) and shiftDown and key == wx.WXK_DOWN) and self.CanEdit():
self.OnHistoryInsert(step=-1)
# Search up the history for the text in front of the cursor.
elif key == wx.WXK_F8:
self.OnHistorySearch()
# Don't backspace over the latest non-continuation prompt.
elif key == wx.WXK_BACK:
if selecting and self.CanEdit():
event.Skip()
elif currpos > self.promptPosEnd:
event.Skip()
# Only allow these keys after the latest prompt.
elif key in (wx.WXK_TAB, wx.WXK_DELETE):
if self.CanEdit():
event.Skip()
# Don't toggle between insert mode and overwrite mode.
elif key == wx.WXK_INSERT:
pass
# Don't allow line deletion.
elif controlDown and key in (ord('L'), ord('l')):
# TODO : Allow line deletion eventually...
#event.Skip()
pass
# Don't allow line transposition.
elif controlDown and key in (ord('T'), ord('t')):
# TODO : Allow line transposition eventually...
# TODO : Will have to adjust markers accordingly and test if allowed...
#event.Skip()
pass
# Basic navigation keys should work anywhere.
elif key in NAVKEYS:
event.Skip()
# Protect the readonly portion of the shell.
elif not self.CanEdit():
pass
else:
event.Skip()
def OnShowCompHistory(self):
"""Show possible autocompletion Words from already typed words."""
#copy from history
his = self.history[:]
#put together in one string
joined = " ".join (his)
import re
#sort out only "good" words
newlist = re.split("[ \.\[\]=}(\)\,0-9\"]", joined)
#length > 1 (mix out "trash")
thlist = []
for i in newlist:
if len (i) > 1:
thlist.append (i)
#unique (no duplicate words
#oneliner from german python forum => unique list
unlist = [thlist[i] for i in xrange(len(thlist)) if thlist[i] not in thlist[:i]]
#sort lowercase
unlist.sort(lambda a, b: cmp(a.lower(), b.lower()))
#this is more convenient, isn't it?
self.AutoCompSetIgnoreCase(True)
#join again together in a string
stringlist = " ".join(unlist)
#pos von 0 noch ausrechnen
#how big is the offset?
cpos = self.GetCurrentPos() - 1
while chr (self.GetCharAt (cpos)).isalnum():
cpos -= 1
#the most important part
self.AutoCompShow(self.GetCurrentPos() - cpos -1, stringlist)
def clearCommand(self):
"""Delete the current, unexecuted command."""
startpos = self.promptPosEnd
endpos = self.GetTextLength()
self.SetSelection(startpos, endpos)
self.ReplaceSelection('')
self.more = False
def OnHistoryReplace(self, step):
"""Replace with the previous/next command from the history buffer."""
self.clearCommand()
self.replaceFromHistory(step)
def replaceFromHistory(self, step):
"""Replace selection with command from the history buffer."""
ps2 = str(sys.ps2)
self.ReplaceSelection('')
newindex = self.historyIndex + step
if -1 <= newindex <= len(self.history):
self.historyIndex = newindex
if 0 <= newindex <= len(self.history)-1:
command = self.history[self.historyIndex]
command = command.replace('\n', os.linesep + ps2)
self.ReplaceSelection(command)
def OnHistoryInsert(self, step):
"""Insert the previous/next command from the history buffer."""
if not self.CanEdit():
return
startpos = self.GetCurrentPos()
self.replaceFromHistory(step)
endpos = self.GetCurrentPos()
self.SetSelection(endpos, startpos)
def OnHistorySearch(self):
"""Search up the history buffer for the text in front of the cursor."""
if not self.CanEdit():
return
startpos = self.GetCurrentPos()
# The text up to the cursor is what we search for.
numCharsAfterCursor = self.GetTextLength() - startpos
searchText = self.getCommand(rstrip=False)
if numCharsAfterCursor > 0:
searchText = searchText[:-numCharsAfterCursor]
if not searchText:
return
# Search upwards from the current history position and loop
# back to the beginning if we don't find anything.
if (self.historyIndex <= -1) \
or (self.historyIndex >= len(self.history)-2):
searchOrder = range(len(self.history))
else:
searchOrder = range(self.historyIndex+1, len(self.history)) + \
range(self.historyIndex)
for i in searchOrder:
command = self.history[i]
if command[:len(searchText)] == searchText:
# Replace the current selection with the one we found.
self.ReplaceSelection(command[len(searchText):])
endpos = self.GetCurrentPos()
self.SetSelection(endpos, startpos)
# We've now warped into middle of the history.
self.historyIndex = i
break
def setStatusText(self, text):
"""Display status information."""
# This method will likely be replaced by the enclosing app to
# do something more interesting, like write to a status bar.
print text
def insertLineBreak(self):
"""Insert a new line break."""
if self.CanEdit():
self.write(os.linesep)
self.more = True
self.prompt()
def processLine(self):
"""Process the line of text at which the user hit Enter."""
# The user hit ENTER and we need to decide what to do. They
# could be sitting on any line in the shell.
thepos = self.GetCurrentPos()
startpos = self.promptPosEnd
endpos = self.GetTextLength()
ps2 = str(sys.ps2)
# If they hit RETURN inside the current command, execute the
# command.
if self.CanEdit():
self.SetCurrentPos(endpos)
self.interp.more = False
command = self.GetTextRange(startpos, endpos)
lines = command.split(os.linesep + ps2)
lines = [line.rstrip() for line in lines]
command = '\n'.join(lines)
if self.reader.isreading:
if not command:
# Match the behavior of the standard Python shell
# when the user hits return without entering a
# value.
command = '\n'
self.reader.input = command
self.write(os.linesep)
else:
self.push(command)
wx.FutureCall(1, self.EnsureCaretVisible)
# Or replace the current command with the other command.
else:
# If the line contains a command (even an invalid one).
if self.getCommand(rstrip=False):
command = self.getMultilineCommand()
self.clearCommand()
self.write(command)
# Otherwise, put the cursor back where we started.
else:
self.SetCurrentPos(thepos)
self.SetAnchor(thepos)
def getMultilineCommand(self, rstrip=True):
"""Extract a multi-line command from the editor.
The command may not necessarily be valid Python syntax."""
# XXX Need to extract real prompts here. Need to keep track of
# the prompt every time a command is issued.
ps1 = str(sys.ps1)
ps1size = len(ps1)
ps2 = str(sys.ps2)
ps2size = len(ps2)
# This is a total hack job, but it works.
text = self.GetCurLine()[0]
line = self.GetCurrentLine()
while text[:ps2size] == ps2 and line > 0:
line -= 1
self.GotoLine(line)
text = self.GetCurLine()[0]
if text[:ps1size] == ps1:
line = self.GetCurrentLine()
self.GotoLine(line)
startpos = self.GetCurrentPos() + ps1size
line += 1
self.GotoLine(line)
while self.GetCurLine()[0][:ps2size] == ps2:
line += 1
self.GotoLine(line)
stoppos = self.GetCurrentPos()
command = self.GetTextRange(startpos, stoppos)
command = command.replace(os.linesep + ps2, '\n')
command = command.rstrip()
command = command.replace('\n', os.linesep + ps2)
else:
command = ''
if rstrip:
command = command.rstrip()
return command
def getCommand(self, text=None, rstrip=True):
"""Extract a command from text which may include a shell prompt.
The command may not necessarily be valid Python syntax."""
if not text:
text = self.GetCurLine()[0]
# Strip the prompt off the front leaving just the command.
command = self.lstripPrompt(text)
if command == text:
command = '' # Real commands have prompts.
if rstrip:
command = command.rstrip()
return command
def lstripPrompt(self, text):
"""Return text without a leading prompt."""
ps1 = str(sys.ps1)
ps1size = len(ps1)
ps2 = str(sys.ps2)
ps2size = len(ps2)
# Strip the prompt off the front of text.
if text[:ps1size] == ps1:
text = text[ps1size:]
elif text[:ps2size] == ps2:
text = text[ps2size:]
return text
def push(self, command, silent = False):
"""Send command to the interpreter for execution."""
if not silent:
self.write(os.linesep)
#DNM
if USE_MAGIC:
command=magic(command)
busy = wx.BusyCursor()
self.waiting = True
self.lastUpdate=None
self.more = self.interp.push(command)
self.lastUpdate=None
self.waiting = False
del busy
if not self.more:
self.addHistory(command.rstrip())
if not silent:
self.prompt()
def addHistory(self, command):
"""Add command to the command history."""
# Reset the history position.
self.historyIndex = -1
# Insert this command into the history, unless it's a blank
# line or the same as the last command.
if command != '' \
and (len(self.history) == 0 or command != self.history[0]):
self.history.insert(0, command)
dispatcher.send(signal="Shell.addHistory", command=command)
def write(self, text):
"""Display text in the shell.
Replace line endings with OS-specific endings."""
text = self.fixLineEndings(text)
self.AddText(text)
self.EnsureCaretVisible()
if self.waiting:
if self.lastUpdate==None:
self.lastUpdate=time.time()
if time.time()-self.lastUpdate > PRINT_UPDATE_MAX_TIME:
self.Update()
self.lastUpdate=time.time()
def fixLineEndings(self, text):
"""Return text with line endings replaced by OS-specific endings."""
lines = text.split('\r\n')
for l in range(len(lines)):
chunks = lines[l].split('\r')
for c in range(len(chunks)):
chunks[c] = os.linesep.join(chunks[c].split('\n'))
lines[l] = os.linesep.join(chunks)
text = os.linesep.join(lines)
return text
def prompt(self):
"""Display proper prompt for the context: ps1, ps2 or ps3.
If this is a continuation line, autoindent as necessary."""
isreading = self.reader.isreading
skip = False
if isreading:
prompt = str(sys.ps3)
elif self.more:
prompt = str(sys.ps2)
else:
prompt = str(sys.ps1)
pos = self.GetCurLine()[1]
if pos > 0:
if isreading:
skip = True
else:
self.write(os.linesep)
if not self.more:
self.promptPosStart = self.GetCurrentPos()
if not skip:
self.write(prompt)
if not self.more:
self.promptPosEnd = self.GetCurrentPos()
# Keep the undo feature from undoing previous responses.
self.EmptyUndoBuffer()
if self.more:
line_num=self.GetCurrentLine()
currentLine=self.GetLine(line_num)
previousLine=self.GetLine(line_num-1)[len(prompt):]
pstrip=previousLine.strip()
lstrip=previousLine.lstrip()
# Get the first alnum word:
first_word=[]
for i in pstrip:
if i.isalnum():
first_word.append(i)
else:
break
first_word = ''.join(first_word)
if pstrip == '':
# because it is all whitespace!
indent=previousLine.strip('\n').strip('\r')
else:
indent=previousLine[:(len(previousLine)-len(lstrip))]
if pstrip[-1]==':' and \
first_word in ['if','else','elif','for','while',
'def','class','try','except','finally']:
indent+=' '*4
self.write(indent)
self.EnsureCaretVisible()
self.ScrollToColumn(0)
def readline(self):
"""Replacement for stdin.readline()."""
input = ''
reader = self.reader
reader.isreading = True
self.prompt()
try:
while not reader.input:
wx.YieldIfNeeded()
input = reader.input
finally:
reader.input = ''
reader.isreading = False
input = str(input) # In case of Unicode.
return input
def readlines(self):
"""Replacement for stdin.readlines()."""
lines = []
while lines[-1:] != ['\n']:
lines.append(self.readline())
return lines
def raw_input(self, prompt=''):
"""Return string based on user input."""
if prompt:
self.write(prompt)
return self.readline()
def ask(self, prompt='Please enter your response:'):
"""Get response from the user using a dialog box."""
dialog = wx.TextEntryDialog(None, prompt,
'Input Dialog (Raw)', '')
try:
if dialog.ShowModal() == wx.ID_OK:
text = dialog.GetValue()
return text
finally:
dialog.Destroy()
return ''
def pause(self):
"""Halt execution pending a response from the user."""
self.ask('Press enter to continue:')
def clear(self):
"""Delete all text from the shell."""
self.ClearAll()
def run(self, command, prompt=True, verbose=True):
"""Execute command as if it was typed in directly.
>>> shell.run('print "this"')
>>> print "this"
this
>>>
"""
# Go to the very bottom of the text.
endpos = self.GetTextLength()
self.SetCurrentPos(endpos)
command = command.rstrip()
if prompt: self.prompt()
if verbose: self.write(command)
self.push(command)
def runfile(self, filename):
"""Execute all commands in file as if they were typed into the
shell."""
file = open(filename)
try:
self.prompt()
for command in file.readlines():
if command[:6] == 'shell.':
# Run shell methods silently.
self.run(command, prompt=False, verbose=False)
else:
self.run(command, prompt=False, verbose=True)
finally:
file.close()
def autoCompleteShow(self, command, offset = 0):
"""Display auto-completion popup list."""
self.AutoCompSetAutoHide(self.autoCompleteAutoHide)
self.AutoCompSetIgnoreCase(self.autoCompleteCaseInsensitive)
list = self.interp.getAutoCompleteList(command,
includeMagic=self.autoCompleteIncludeMagic,
includeSingle=self.autoCompleteIncludeSingle,
includeDouble=self.autoCompleteIncludeDouble)
if list:
options = ' '.join(list)
#offset = 0
self.AutoCompShow(offset, options)
def autoCallTipShow(self, command, insertcalltip = True, forceCallTip = False):
"""Display argument spec and docstring in a popup window."""
if self.CallTipActive():
self.CallTipCancel()
(name, argspec, tip) = self.interp.getCallTip(command)
if tip:
dispatcher.send(signal='Shell.calltip', sender=self, calltip=tip)
if not self.autoCallTip and not forceCallTip:
return
startpos = self.GetCurrentPos()
if argspec and insertcalltip and self.callTipInsert:
self.write(argspec + ')')
endpos = self.GetCurrentPos()
self.SetSelection(startpos, endpos)
if tip:
tippos = startpos - (len(name) + 1)
fallback = startpos - self.GetColumn(startpos)
# In case there isn't enough room, only go back to the
# fallback.
tippos = max(tippos, fallback)
self.CallTipShow(tippos, tip)
def OnCallTipAutoCompleteManually (self, shiftDown):
"""AutoComplete and Calltips manually."""
if self.AutoCompActive():
self.AutoCompCancel()
currpos = self.GetCurrentPos()
stoppos = self.promptPosEnd
cpos = currpos
#go back until '.' is found
pointavailpos = -1
while cpos >= stoppos:
if self.GetCharAt(cpos) == ord ('.'):
pointavailpos = cpos
break
cpos -= 1
#word from non whitespace until '.'
if pointavailpos != -1:
#look backward for first whitespace char
textbehind = self.GetTextRange (pointavailpos + 1, currpos)
pointavailpos += 1
if not shiftDown:
#call AutoComplete
stoppos = self.promptPosEnd
textbefore = self.GetTextRange(stoppos, pointavailpos)
self.autoCompleteShow(textbefore, len (textbehind))
else:
#call CallTips
cpos = pointavailpos
begpos = -1
while cpos > stoppos:
if chr(self.GetCharAt(cpos)).isspace():
begpos = cpos
break
cpos -= 1
if begpos == -1:
begpos = cpos
ctips = self.GetTextRange (begpos, currpos)
ctindex = ctips.find ('(')
if ctindex != -1 and not self.CallTipActive():
#insert calltip, if current pos is '(', otherwise show it only
self.autoCallTipShow(ctips[:ctindex + 1],
self.GetCharAt(currpos - 1) == ord('(') and \
self.GetCurrentPos() == self.GetTextLength(),
True)
def writeOut(self, text):
"""Replacement for stdout."""
self.write(text)
def writeErr(self, text):
"""Replacement for stderr."""
self.write(text)
def redirectStdin(self, redirect=True):
"""If redirect is true then sys.stdin will come from the shell."""
if redirect:
sys.stdin = self.reader
else:
sys.stdin = self.stdin
def redirectStdout(self, redirect=True):
"""If redirect is true then sys.stdout will go to the shell."""
if redirect:
sys.stdout = PseudoFileOut(self.writeOut)
else:
sys.stdout = self.stdout
def redirectStderr(self, redirect=True):
"""If redirect is true then sys.stderr will go to the shell."""
if redirect:
sys.stderr = PseudoFileErr(self.writeErr)
else:
sys.stderr = self.stderr
def CanCut(self):
"""Return true if text is selected and can be cut."""
if self.GetSelectionStart() != self.GetSelectionEnd() \
and self.GetSelectionStart() >= self.promptPosEnd \
and self.GetSelectionEnd() >= self.promptPosEnd:
return True
else:
return False
def CanPaste(self):
"""Return true if a paste should succeed."""
if self.CanEdit() and editwindow.EditWindow.CanPaste(self):
return True
else:
return False
def CanEdit(self):
"""Return true if editing should succeed."""
if self.GetSelectionStart() != self.GetSelectionEnd():
if self.GetSelectionStart() >= self.promptPosEnd \
and self.GetSelectionEnd() >= self.promptPosEnd:
return True
else:
return False
else:
return self.GetCurrentPos() >= self.promptPosEnd
def Cut(self):
"""Remove selection and place it on the clipboard."""
if self.CanCut() and self.CanCopy():
if self.AutoCompActive():
self.AutoCompCancel()
if self.CallTipActive():
self.CallTipCancel()
self.Copy()
self.ReplaceSelection('')
def Copy(self):
"""Copy selection and place it on the clipboard."""
if self.CanCopy():
ps1 = str(sys.ps1)
ps2 = str(sys.ps2)
command = self.GetSelectedText()
command = command.replace(os.linesep + ps2, os.linesep)
command = command.replace(os.linesep + ps1, os.linesep)
command = self.lstripPrompt(text=command)
data = wx.TextDataObject(command)
self._clip(data)
def CopyWithPrompts(self):
"""Copy selection, including prompts, and place it on the clipboard."""
if self.CanCopy():
command = self.GetSelectedText()
data = wx.TextDataObject(command)
self._clip(data)
def CopyWithPromptsPrefixed(self):
"""Copy selection, including prompts prefixed with four
spaces, and place it on the clipboard."""
if self.CanCopy():
command = self.GetSelectedText()
spaces = ' ' * 4
command = spaces + command.replace(os.linesep,
os.linesep + spaces)
data = wx.TextDataObject(command)
self._clip(data)
def _clip(self, data):
if wx.TheClipboard.Open():
wx.TheClipboard.UsePrimarySelection(False)
wx.TheClipboard.SetData(data)
wx.TheClipboard.Flush()
wx.TheClipboard.Close()
def Paste(self):
"""Replace selection with clipboard contents."""
if self.CanPaste() and wx.TheClipboard.Open():
ps2 = str(sys.ps2)
if wx.TheClipboard.IsSupported(wx.DataFormat(wx.DF_TEXT)):
data = wx.TextDataObject()
if wx.TheClipboard.GetData(data):
self.ReplaceSelection('')
command = data.GetText()
command = command.rstrip()
command = self.fixLineEndings(command)
command = self.lstripPrompt(text=command)
command = command.replace(os.linesep + ps2, '\n')
command = command.replace(os.linesep, '\n')
command = command.replace('\n', os.linesep + ps2)
self.write(command)
wx.TheClipboard.Close()
def PasteAndRun(self):
"""Replace selection with clipboard contents, run commands."""
text = ''
if wx.TheClipboard.Open():
if wx.TheClipboard.IsSupported(wx.DataFormat(wx.DF_TEXT)):
data = wx.TextDataObject()
if wx.TheClipboard.GetData(data):
text = data.GetText()
wx.TheClipboard.Close()
if text:
self.Execute(text)
def Execute(self, text):
"""Replace selection with text and run commands."""
ps1 = str(sys.ps1)
ps2 = str(sys.ps2)
endpos = self.GetTextLength()
self.SetCurrentPos(endpos)
startpos = self.promptPosEnd
self.SetSelection(startpos, endpos)
self.ReplaceSelection('')
text = text.lstrip()
text = self.fixLineEndings(text)
text = self.lstripPrompt(text)
text = text.replace(os.linesep + ps1, '\n')
text = text.replace(os.linesep + ps2, '\n')
text = text.replace(os.linesep, '\n')
lines = text.split('\n')
commands = []
command = ''
for line in lines:
if line.strip() == ps2.strip():
# If we are pasting from something like a
# web page that drops the trailing space
# from the ps2 prompt of a blank line.
line = ''
lstrip = line.lstrip()
if line.strip() != '' and lstrip == line and \
lstrip[:4] not in ['else','elif'] and \
lstrip[:6] != 'except':
# New command.
if command:
# Add the previous command to the list.
commands.append(command)
# Start a new command, which may be multiline.
command = line
else:
# Multiline command. Add to the command.
command += '\n'
command += line
commands.append(command)
for command in commands:
command = command.replace('\n', os.linesep + ps2)
self.write(command)
self.processLine()
def wrap(self, wrap=True):
"""Sets whether text is word wrapped."""
try:
self.SetWrapMode(wrap)
except AttributeError:
return 'Wrapping is not available in this version.'
def zoom(self, points=0):
"""Set the zoom level.
This number of points is added to the size of all fonts. It
may be positive to magnify or negative to reduce."""
self.SetZoom(points)
def LoadSettings(self, config):
self.autoComplete = \
config.ReadBool('Options/AutoComplete', True)
self.autoCompleteIncludeMagic = \
config.ReadBool('Options/AutoCompleteIncludeMagic', True)
self.autoCompleteIncludeSingle = \
config.ReadBool('Options/AutoCompleteIncludeSingle', True)
self.autoCompleteIncludeDouble = \
config.ReadBool('Options/AutoCompleteIncludeDouble', True)
self.autoCallTip = config.ReadBool('Options/AutoCallTip', True)
self.callTipInsert = config.ReadBool('Options/CallTipInsert', True)
self.SetWrapMode(config.ReadBool('View/WrapMode', True))
self.lineNumbers = config.ReadBool('View/ShowLineNumbers', True)
self.setDisplayLineNumbers (self.lineNumbers)
zoom = config.ReadInt('View/Zoom/Shell', -99)
if zoom != -99:
self.SetZoom(zoom)
def SaveSettings(self, config):
config.WriteBool('Options/AutoComplete', self.autoComplete)
config.WriteBool('Options/AutoCompleteIncludeMagic',
self.autoCompleteIncludeMagic)
config.WriteBool('Options/AutoCompleteIncludeSingle',
self.autoCompleteIncludeSingle)
config.WriteBool('Options/AutoCompleteIncludeDouble',
self.autoCompleteIncludeDouble)
config.WriteBool('Options/AutoCallTip', self.autoCallTip)
config.WriteBool('Options/CallTipInsert', self.callTipInsert)
config.WriteBool('View/WrapMode', self.GetWrapMode())
config.WriteBool('View/ShowLineNumbers', self.lineNumbers)
config.WriteInt('View/Zoom/Shell', self.GetZoom())
def GetContextMenu(self):
"""
Create and return a context menu for the shell.
This is used instead of the scintilla default menu
in order to correctly respect our immutable buffer.
"""
menu = wx.Menu()
menu.Append(wx.ID_UNDO, "Undo")
menu.Append(wx.ID_REDO, "Redo")
menu.AppendSeparator()
menu.Append(wx.ID_CUT, "Cut")
menu.Append(wx.ID_COPY, "Copy")
menu.Append(frame.ID_COPY_PLUS, "Copy Plus")
menu.Append(wx.ID_PASTE, "Paste")
menu.Append(frame.ID_PASTE_PLUS, "Paste Plus")
menu.Append(wx.ID_CLEAR, "Clear")
menu.AppendSeparator()
menu.Append(wx.ID_SELECTALL, "Select All")
return menu
def OnContextMenu(self, evt):
menu = self.GetContextMenu()
self.PopupMenu(menu)
def OnUpdateUI(self, evt):
id = evt.Id
if id in (wx.ID_CUT, wx.ID_CLEAR):
evt.Enable(self.CanCut())
elif id in (wx.ID_COPY, frame.ID_COPY_PLUS):
evt.Enable(self.CanCopy())
elif id in (wx.ID_PASTE, frame.ID_PASTE_PLUS):
evt.Enable(self.CanPaste())
elif id == wx.ID_UNDO:
evt.Enable(self.CanUndo())
elif id == wx.ID_REDO:
evt.Enable(self.CanRedo())
## NOTE: The DnD of file names is disabled until we can figure out how
## best to still allow DnD of text.
## #seb : File drag and drop
## class FileDropTarget(wx.FileDropTarget):
## def __init__(self, obj):
## wx.FileDropTarget.__init__(self)
## self.obj = obj
## def OnDropFiles(self, x, y, filenames):
## if len(filenames) == 1:
## txt = 'r\"%s\"' % filenames[0]
## else:
## txt = '( '
## for f in filenames:
## txt += 'r\"%s\" , ' % f
## txt += ')'
## self.obj.AppendText(txt)
## pos = self.obj.GetCurrentPos()
## self.obj.SetCurrentPos( pos )
## self.obj.SetSelection( pos, pos )
## class TextAndFileDropTarget(wx.DropTarget):
## def __init__(self, shell):
## wx.DropTarget.__init__(self)
## self.shell = shell
## self.compdo = wx.DataObjectComposite()
## self.textdo = wx.TextDataObject()
## self.filedo = wx.FileDataObject()
## self.compdo.Add(self.textdo)
## self.compdo.Add(self.filedo, True)
## self.SetDataObject(self.compdo)
## def OnDrop(self, x, y):
## return True
## def OnData(self, x, y, result):
## self.GetData()
## if self.textdo.GetTextLength() > 1:
## text = self.textdo.GetText()
## # *** Do somethign with the dragged text here...
## self.textdo.SetText('')
## else:
## filenames = str(self.filename.GetFilenames())
## if len(filenames) == 1:
## txt = 'r\"%s\"' % filenames[0]
## else:
## txt = '( '
## for f in filenames:
## txt += 'r\"%s\" , ' % f
## txt += ')'
## self.shell.AppendText(txt)
## pos = self.shell.GetCurrentPos()
## self.shell.SetCurrentPos( pos )
## self.shell.SetSelection( pos, pos )
## return result
| gpl-3.0 |
waytai/networkx | doc/gh-pages.py | 34 | 4346 | #!/usr/bin/env python
"""Script to commit the doc build outputs into the github-pages repo.
Use:
gh-pages.py [tag]
If no tag is given, the current output of 'git describe' is used. If given,
that is how the resulting directory will be named.
In practice, you should use either actual clean tags from a current build or
something like 'current' as a stable URL for the most current version"""
# Borrowed from IPython.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import os
import re
import shutil
import sys
from os import chdir as cd
from os.path import join as pjoin
from subprocess import Popen, PIPE, CalledProcessError, check_call
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
pages_dir = 'gh-pages'
html_dir = 'build/dist'
pdf_dir = 'build/latex'
pages_repo = 'git@github.com:networkx/documentation.git'
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def sh(cmd):
"""Execute command in a subshell, return status code."""
return check_call(cmd, shell=True)
def sh2(cmd):
"""Execute command in a subshell, return stdout.
Stderr is unbuffered from the subshell.x"""
p = Popen(cmd, stdout=PIPE, shell=True)
out = p.communicate()[0]
retcode = p.returncode
if retcode:
raise CalledProcessError(retcode, cmd)
else:
return out.rstrip()
def sh3(cmd):
"""Execute command in a subshell, return stdout, stderr
If anything appears in stderr, print it out to sys.stderr"""
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
out, err = p.communicate()
retcode = p.returncode
if retcode:
raise CalledProcessError(retcode, cmd)
else:
return out.rstrip(), err.rstrip()
def init_repo(path):
"""clone the gh-pages repo if we haven't already."""
sh("git clone %s %s"%(pages_repo, path))
here = os.getcwdu()
cd(path)
sh('git checkout gh-pages')
cd(here)
#-----------------------------------------------------------------------------
# Script starts
#-----------------------------------------------------------------------------
if __name__ == '__main__':
# The tag can be given as a positional argument
try:
tag = sys.argv[1]
except IndexError:
try:
tag = sh2('git describe --exact-match')
except CalledProcessError:
print("using development as label")
tag = "development" # Fallback
startdir = os.getcwdu()
if not os.path.exists(pages_dir):
# init the repo
init_repo(pages_dir)
else:
# ensure up-to-date before operating
cd(pages_dir)
sh('git checkout gh-pages')
sh('git pull')
cd(startdir)
dest = pjoin(pages_dir, tag)
# don't `make html` here, because gh-pages already depends on html in Makefile
# sh('make html')
if tag != 'dev':
# only build pdf for non-dev targets
#sh2('make pdf')
pass
# This is pretty unforgiving: we unconditionally nuke the destination
# directory, and then copy the html tree in there
shutil.rmtree(dest, ignore_errors=True)
shutil.copytree(html_dir, dest)
if tag != 'dev':
#shutil.copy(pjoin(pdf_dir, 'ipython.pdf'), pjoin(dest, 'ipython.pdf'))
pass
try:
cd(pages_dir)
status = sh2('git status | head -1')
branch = re.match('\# On branch (.*)$', status).group(1)
if branch != 'gh-pages':
e = 'On %r, git branch is %r, MUST be "gh-pages"' % (pages_dir,
branch)
raise RuntimeError(e)
sh('git add -A %s' % tag)
sh('git commit -m"Updated doc release: %s"' % tag)
print('\nMost recent 3 commits:')
sys.stdout.flush()
sh('git --no-pager log --oneline HEAD~3..')
finally:
cd(startdir)
print('\nNow verify the build in: %r' % dest)
print("If everything looks good, 'git push'")
| bsd-3-clause |
moomou/heron | heron/common/src/python/utils/metrics/py_metrics.py | 1 | 5091 | # copyright 2016 twitter. all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Python program related metrics."""
import gc
import psutil
import traceback
from .metrics import AssignableMetrics
from .metrics_helper import BaseMetricsHelper
import heron.common.src.python.constants as constants
from heron.common.src.python.config import system_config
from heron.common.src.python.utils.log import Log
# pylint: disable=too-many-instance-attributes
class PyMetrics(BaseMetricsHelper):
"""Helper class to collect PyHeron program metrics"""
def __init__(self, metrics_collector):
self.process = psutil.Process()
# total sys cpu time
self.sys_cpu_time = AssignableMetrics(0)
# total user cpu time
self.user_cpu_time = AssignableMetrics(0)
# threads cpu usage
self.threads = AssignableMetrics(0)
# number of open file descriptors
self.fd_nums = AssignableMetrics(0)
# number of threads
self.num_threads = AssignableMetrics([])
# rss: aka "Resident Set Size"
# this is the non-swapped physical memory a process has used.
self.physical_memory = AssignableMetrics(0)
# vms: "Virtual Memory Size", this is the total
# amount of virtual memory used by the process.
self.virtual_memory = AssignableMetrics(0)
# stats about three generations of GC
# count is the number of objects in one generation
# threshold is the collect frequency of one generation
self.g1_count, self.g1_threshold = AssignableMetrics(0), AssignableMetrics(0)
self.g2_count, self.g2_threshold = AssignableMetrics(0), AssignableMetrics(0)
self.g3_count, self.g3_threshold = AssignableMetrics(0), AssignableMetrics(0)
PY_SYS_CPU_TIME = '__py-sys-cpu-time-secs'
PY_USER_CPU_TIME = '__py-user-cpu-time-secs'
PY_FD_NUMS = '__py-file-descriptors-number'
PY_PHYSICAL_MEMORY = '__py-physical-memory-byte'
PY_VIRTUAL_MEMORY = '__py-virtual-memory-byte'
PY_GC_GENERATION_1_COUNT = '__py-generation-1-count'
PY_GC_GENERATION_2_COUNT = '__py-generation-2-count'
PY_GC_GENERATION_3_COUNT = '__py-generation-3-count'
PY_GC_GENERATION_1_THRESHOLD = '__py-generation-1-collection-threshold'
PY_GC_GENERATION_2_THRESHOLD = '__py-generation-2-collection-threshold'
PY_GC_GENERATION_3_THRESHOLD = '__py-generation-3-collection-threshold'
self.metrics = {PY_SYS_CPU_TIME: self.sys_cpu_time,
PY_USER_CPU_TIME: self.user_cpu_time,
PY_FD_NUMS: self.fd_nums,
PY_PHYSICAL_MEMORY: self.physical_memory,
PY_VIRTUAL_MEMORY: self.virtual_memory,
PY_GC_GENERATION_1_COUNT: self.g1_count,
PY_GC_GENERATION_2_COUNT: self.g2_count,
PY_GC_GENERATION_3_COUNT: self.g3_count,
PY_GC_GENERATION_1_THRESHOLD: self.g1_threshold,
PY_GC_GENERATION_2_THRESHOLD: self.g2_threshold,
PY_GC_GENERATION_3_THRESHOLD: self.g3_threshold}
super(PyMetrics, self).__init__(self.metrics)
sys_config = system_config.get_sys_config()
interval = float(sys_config[constants.HERON_METRICS_EXPORT_INTERVAL_SEC])
self.register_metrics(metrics_collector, interval)
def update_cpu_time(self):
try:
t = self.process.cpu_times()
self.sys_cpu_time.update(t.system)
self.user_cpu_time.update(t.user)
except Exception as e:
Log.error(traceback.format_exc(e))
def update_threads_time(self):
try:
tt = self.process.threads()
self.threads.update([(t.id, t.user_time, t.system_time) for t in tt])
except Exception as e:
Log.error(traceback.format_exc(e))
def update_fds(self):
try:
self.fd_nums.update(self.process.num_fds())
except Exception as e:
Log.error(traceback.format_exc(e))
def update_memory_usage(self):
try:
m = self.process.memory_info()
self.physical_memory.update(m.rss)
self.virtual_memory.update(m.vms)
except Exception as e:
Log.error(traceback.format_exc(e))
def update_gc_stat(self):
try:
c1, c2, c3 = gc.get_count()
t1, t2, t3 = gc.get_threshold()
self.g1_count.update(c1)
self.g2_count.update(c2)
self.g3_count.update(c3)
self.t1_count.update(t1)
self.t2_count.update(t2)
self.t3_count.update(t3)
except Exception as e:
Log.error(traceback.format_exc(e))
def update_all(self):
self.update_cpu_time()
self.update_threads_time()
self.update_fds()
self.update_memory_usage()
self.update_gc_stat()
| apache-2.0 |
peterlauri/django | tests/sites_framework/tests.py | 117 | 2693 | from django.conf import settings
from django.contrib.sites.managers import CurrentSiteManager
from django.contrib.sites.models import Site
from django.core import checks
from django.db import models
from django.test import SimpleTestCase, TestCase
from django.test.utils import isolate_apps
from .models import CustomArticle, ExclusiveArticle, SyndicatedArticle
class SitesFrameworkTestCase(TestCase):
def setUp(self):
Site.objects.get_or_create(id=settings.SITE_ID, domain="example.com", name="example.com")
Site.objects.create(id=settings.SITE_ID + 1, domain="example2.com", name="example2.com")
def test_site_fk(self):
article = ExclusiveArticle.objects.create(title="Breaking News!", site_id=settings.SITE_ID)
self.assertEqual(ExclusiveArticle.on_site.all().get(), article)
def test_sites_m2m(self):
article = SyndicatedArticle.objects.create(title="Fresh News!")
article.sites.add(Site.objects.get(id=settings.SITE_ID))
article.sites.add(Site.objects.get(id=settings.SITE_ID + 1))
article2 = SyndicatedArticle.objects.create(title="More News!")
article2.sites.add(Site.objects.get(id=settings.SITE_ID + 1))
self.assertEqual(SyndicatedArticle.on_site.all().get(), article)
def test_custom_named_field(self):
article = CustomArticle.objects.create(
title="Tantalizing News!",
places_this_article_should_appear_id=settings.SITE_ID,
)
self.assertEqual(CustomArticle.on_site.all().get(), article)
@isolate_apps('sites_framework')
class CurrentSiteManagerChecksTests(SimpleTestCase):
def test_invalid_name(self):
class InvalidArticle(models.Model):
on_site = CurrentSiteManager("places_this_article_should_appear")
errors = InvalidArticle.check()
expected = [
checks.Error(
"CurrentSiteManager could not find a field named "
"'places_this_article_should_appear'.",
obj=InvalidArticle.on_site,
id='sites.E001',
)
]
self.assertEqual(errors, expected)
def test_invalid_field_type(self):
class ConfusedArticle(models.Model):
site = models.IntegerField()
on_site = CurrentSiteManager()
errors = ConfusedArticle.check()
expected = [
checks.Error(
"CurrentSiteManager cannot use 'ConfusedArticle.site' as it is "
"not a foreign key or a many-to-many field.",
obj=ConfusedArticle.on_site,
id='sites.E002',
)
]
self.assertEqual(errors, expected)
| bsd-3-clause |
CityPulse/CP_Resourcemanagement | wrapper_dev/deploy_mgt.py | 1 | 4726 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import os, curses, curses.ascii, subprocess
import glob
from time import sleep
class Wrapper(object):
deploy_dir = os.path.join("..", "virtualisation", "autodeploy")
def __init__(self, deploy_json):
self.dir = os.path.dirname(deploy_json)
self.name = os.path.basename(self.dir)
self.zipname = self.name + ".zip"
self.compress = False
self.deployed = os.path.exists(os.path.join(Wrapper.deploy_dir, self.zipname))
def __repr__(self):
return "%s in %s will %s compress" % (self.name, self.dir, "" if self.compress else "not")
def start_compress(self):
log = open("log.txt", "a")
filelist = os.listdir(self.dir)
cmd = ["zip", "-r", os.path.join(os.path.dirname(self.dir), self.zipname)] + filelist
log.write("executing " + " ".join(cmd) + " in " + self.dir)
log.flush()
proc = subprocess.Popen(cmd, cwd=self.dir, shell=False, stdout=log, stderr=log)
proc.wait()
cmd = ["mv", self.zipname, os.path.join(Wrapper.deploy_dir, self.zipname)]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.communicate()
self.deployed = True
log.close()
def remove(self):
if self.deployed:
os.remove(os.path.join(Wrapper.deploy_dir, self.zipname))
self.deployed = False
def get_root_dir():
return os.path.dirname(os.path.abspath(__file__))
def find_wrapper():
wrapper = []
folders = glob.glob(os.path.join(get_root_dir(), "*/deploy.json"))
for f in folders:
wrapper.append(Wrapper(f))
return wrapper
def draw_info(scr, wrapper):
count_marked = len(filter(lambda x: x.compress, wrapper))
scr.clear()
scr.box()
scr.addstr(1, 2, "Wrappers selected:")
scr.addstr(1, 21, "%d/%d" % (count_marked, len(wrapper)))
scr.addstr(2, 2, "Commands:")
scr.addstr(2, 12, "Tab/Space/Enter = select; UP/DOWN = next/previous; r = undeploy;")
scr.addstr(3, 12, "d = start deploying; n/p = scroll; ESC = exit")
scr.refresh()
def draw_wrapper(scr, wrapper, start=0, current=0):
height, width = scr.getmaxyx()
end = min(len(wrapper), height - 2) # minus box frame
if end + start > len(wrapper):
start = len(wrapper) - end
scr.clear()
scr.box()
for i in range(0, end):
w = wrapper[i + start]
scr.addstr(i + 1, 1, "%s [%s] %s %s" % (
">" if i + start == current else " ", "X" if w.compress else " ", "D" if w.deployed else " ", w.name))
scr.refresh()
def init_scr():
scr = curses.initscr()
height, width = scr.getmaxyx()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
curses.start_color()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
control_height = 5
chapter_list = curses.newwin(height - control_height, width, 0, 0)
chapter_list.bkgd(curses.color_pair(1))
control = curses.newwin(control_height, width, height - control_height, 0)
control.keypad(1)
control.bkgd(curses.color_pair(1))
return control, chapter_list
def main():
wrapper = find_wrapper()
selected = 0
scr, wrapper_list_win = init_scr()
wrapper_list_start = 0
while True:
draw_info(scr, wrapper)
draw_wrapper(wrapper_list_win, wrapper, wrapper_list_start, selected)
curses.flushinp()
c = scr.getch()
if c == curses.ascii.ESC:
break
elif c == curses.ascii.SP or c == curses.ascii.NL or c == curses.ascii.TAB or c == curses.ascii.BS:
wrapper[selected].compress = not wrapper[selected].compress
elif c == curses.KEY_DOWN:
if selected < len(wrapper) - 1:
selected += 1
elif c == curses.KEY_UP:
if selected > 0:
selected -= 1
elif c == ord('n'):
if wrapper_list_start <= len(wrapper):
wrapper_list_start += 1
elif c == ord('p'):
if wrapper_list_start > 1:
wrapper_list_start -= 1
elif c == ord('d'):
to_compress = filter(lambda x: x.compress, wrapper)
to_compress_count = len(to_compress)
counter = 0
scr.addstr(1, 25, "deploying...")
scr.refresh()
for w in to_compress:
w.start_compress()
counter += 1
scr.addstr(1, 25, "deployed: %d/%d" % (counter, to_compress_count))
scr.refresh()
elif c == ord('r'):
wrapper[selected].remove()
curses.endwin()
return 0
if __name__ == '__main__':
main()
| mit |
sinperwolf/shadowsocks | shadowsocks/crypto/sodium.py | 1032 | 3778 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_ulonglong, byref, \
create_string_buffer, c_void_p
from shadowsocks.crypto import util
__all__ = ['ciphers']
libsodium = None
loaded = False
buf_size = 2048
# for salsa20 and chacha20
BLOCK_SIZE = 64
def load_libsodium():
global loaded, libsodium, buf
libsodium = util.find_library('sodium', 'crypto_stream_salsa20_xor_ic',
'libsodium')
if libsodium is None:
raise Exception('libsodium not found')
libsodium.crypto_stream_salsa20_xor_ic.restype = c_int
libsodium.crypto_stream_salsa20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
libsodium.crypto_stream_chacha20_xor_ic.restype = c_int
libsodium.crypto_stream_chacha20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
buf = create_string_buffer(buf_size)
loaded = True
class SodiumCrypto(object):
def __init__(self, cipher_name, key, iv, op):
if not loaded:
load_libsodium()
self.key = key
self.iv = iv
self.key_ptr = c_char_p(key)
self.iv_ptr = c_char_p(iv)
if cipher_name == 'salsa20':
self.cipher = libsodium.crypto_stream_salsa20_xor_ic
elif cipher_name == 'chacha20':
self.cipher = libsodium.crypto_stream_chacha20_xor_ic
else:
raise Exception('Unknown cipher')
# byte counter, not block counter
self.counter = 0
def update(self, data):
global buf_size, buf
l = len(data)
# we can only prepend some padding to make the encryption align to
# blocks
padding = self.counter % BLOCK_SIZE
if buf_size < padding + l:
buf_size = (padding + l) * 2
buf = create_string_buffer(buf_size)
if padding:
data = (b'\0' * padding) + data
self.cipher(byref(buf), c_char_p(data), padding + l,
self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr)
self.counter += l
# buf is copied to a str object when we access buf.raw
# strip off the padding
return buf.raw[padding:padding + l]
ciphers = {
'salsa20': (32, 8, SodiumCrypto),
'chacha20': (32, 8, SodiumCrypto),
}
def test_salsa20():
cipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_chacha20():
cipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_chacha20()
test_salsa20()
| apache-2.0 |
mitodl/open-discussions | course_catalog/etl/mitpe.py | 1 | 6740 | """MIT Professional course catalog ETL"""
import logging
import re
from datetime import datetime
from decimal import Decimal
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup as bs
from django.conf import settings
from course_catalog.constants import OfferedBy, PlatformType, mitpe_edx_mapping
from course_catalog.etl.utils import (
generate_unique_id,
strip_extra_whitespace,
parse_dates,
map_topics,
)
from course_catalog.models import Course
log = logging.getLogger()
OFFERED_BY = [{"name": OfferedBy.mitpe.value}]
PLATFORM = PlatformType.mitpe.value
def _parse_topics(course):
"""
Extract topics for a course
Args:
course(Tag): The BeautifulSoup tag containing course data
Returns:
list of str: List of topic names
"""
try:
mitpe_topics = (
course.find("div", {"class": "views-field-field-course-topics"})
.find("div", {"class": "field-content"})
.get_text()
.split(",")
)
except AttributeError:
return []
return map_topics(mitpe_topics, mitpe_edx_mapping)
def _parse_price(course):
"""
Extract the course price as a list
Args:
course(Tag): The BeautifulSoup tag containing course data
Returns:
Decimal: Price of course or None
"""
try:
price_text = course.find("div", {"class": "views-field-field-course-fee"}).text
except AttributeError:
return None
price_match = re.search(r"([\d,]+)", price_text)
if price_match:
return Decimal(price_match.group(0).replace(",", ""))
def _parse_run_dates(course):
"""
Extracts the start and end dates for each course run
Args:
course(Tag): The BeautifulSoup tag containing course data
Returns:
list of tuple: List of start and end date tuples for each course run
"""
date_div = course.find("span", {"class": "date-display-range"})
run_dates = []
if date_div:
dates = parse_dates(date_div.get_text())
if dates:
run_dates.append(dates)
return run_dates
def _parse_instructors(course):
"""
Extract instructor names from the course detail page
Args:
details(Tag): BeautifulSoup Tag for course details
Returns:
list of lists: List of first & last names of each instructor
"""
return [
instructor.get_text().split(" ", 1)
for instructor in course.find(
"div", {"class": "views-field-field-lead-instructors"}
).findAll("div", {"class": "field-content"})
]
def _parse_description(details):
"""
Extract short description from the course detail page
Args:
details(Tag): BeautifulSoup Tag for course details
Returns:
list of str: List of course paragraph strings
"""
div = details.find("div", {"class": "course-right"}).find(
"div", {"class": "field--type-text-with-summary"}
)
# pylint: disable=expression-not-assigned
[p.extract() for p in div.findAll("p", {"class": "special"})]
paragraphs = []
for child in div.findAll():
if child.name == "p":
paragraphs.append(child.text)
elif child.name == "h3":
break
return paragraphs
def _has_existing_published_run(course_id):
"""
Returns true if there's an existing published run for the course
Args:
course_id (str): the course id to check
Returns:
bool: True if such a course and run exists, False otherwise
"""
course = Course.objects.filter(platform=PLATFORM, course_id=course_id).first()
return bool(course) and course.runs.filter(published=True).exists()
def extract():
"""Loads the MIT Professional Education catalog data via BeautifulSoup"""
if not settings.MITPE_BASE_URL:
log.error("MIT Professional base URL not set, skipping ETL")
return []
courses = []
soup = bs(
requests.get(urljoin(settings.MITPE_BASE_URL, "/course-catalog")).content,
"html.parser",
)
listings = soup.find("div", {"class": "course-listing"}).findAll(
"div", {"class": "views-row"}
)
for listing in listings:
link = listing.find("a")
url = urljoin(settings.MITPE_BASE_URL, link.get("href"))
details = bs(requests.get(url).content, "html.parser")
description = _parse_description(details)
short_description = strip_extra_whitespace(description[0])
full_description = strip_extra_whitespace(" ".join(p for p in description))
courses.append(
{
"url": url,
"title": strip_extra_whitespace(link.get_text()),
"dates": _parse_run_dates(listing),
"price": _parse_price(listing),
"topics": _parse_topics(listing),
"instructors": _parse_instructors(listing),
"short_description": short_description,
"full_description": full_description,
}
)
return courses
def transform_course(course):
"""Transform a single course"""
course_id = generate_unique_id(course["url"])
runs = [
{
"url": course["url"],
"prices": ([{"price": course["price"]}] if course["price"] else []),
"run_id": generate_unique_id(
f"{course['url']}{datetime.strftime(date_range[0], '%Y%m%d')}"
),
"platform": PLATFORM,
"start_date": date_range[0],
"end_date": date_range[1],
"best_start_date": date_range[0],
"best_end_date": date_range[1],
"offered_by": OFFERED_BY,
"title": course["title"],
"short_description": course["short_description"],
"full_description": course["full_description"],
"instructors": [
{"first_name": first_name, "last_name": last_name}
for (first_name, last_name) in course["instructors"]
],
}
for date_range in course.get("dates", [])
]
return {
"url": course["url"],
"title": course["title"],
"topics": [{"name": topic} for topic in course["topics"]],
"short_description": course["short_description"],
"full_description": course["full_description"],
"course_id": course_id,
"platform": PLATFORM,
"offered_by": OFFERED_BY,
"published": bool(runs) or _has_existing_published_run(course_id),
"runs": runs,
}
def transform(courses):
"""Transform the MIT Professional Education course data"""
return [transform_course(course) for course in courses]
| bsd-3-clause |
nojhan/weboob-devel | modules/boursorama/pages/__init__.py | 3 | 1450 | # -*- coding: utf-8 -*-
# Copyright(C) 2011 Gabriel Kerneis
# Copyright(C) 2010-2011 Jocelyn Jaubert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .account_history import AccountHistory
from .card_history import CardHistory
from .accounts_list import AccountsList
from .login import LoginPage, ProfilIncomplete, UpdateInfoPage
from .two_authentication import AuthenticationPage
from .investment import AccountMarket, AccountLifeInsurance, InvestmentDetail
class AccountPrelevement(AccountsList):
pass
__all__ = ['LoginPage',
'ProfilIncomplete',
'AccountsList',
'AccountHistory',
'CardHistory',
'UpdateInfoPage',
'AuthenticationPage',
'AccountMarket',
'AccountLifeInsurance',
'InvestmentDetail',
]
| agpl-3.0 |
lamblin/pylearn2 | pylearn2/datasets/vector_spaces_dataset.py | 41 | 6009 | """TODO: module-level docstring."""
__authors__ = "Pascal Lamblin and Razvan Pascanu"
__copyright__ = "Copyright 2010-2013, Universite de Montreal"
__credits__ = ["Pascal Lamblin", "Razvan Pascanu", "Ian Goodfellow",
"Mehdi Mirza"]
__license__ = "3-clause BSD"
__maintainer__ = "Pascal Lamblin"
__email__ = "lamblinp@iro"
import functools
import numpy as np
from pylearn2.datasets.dataset import Dataset
from pylearn2.utils import wraps
from pylearn2.utils.iteration import (
FiniteDatasetIterator,
resolve_iterator_class
)
from pylearn2.utils.data_specs import is_flat_specs
from pylearn2.utils.rng import make_np_rng
from pylearn2.utils import contains_nan
class VectorSpacesDataset(Dataset):
"""
A class representing datasets being stored as a number of VectorSpaces.
This can be seen as a generalization of DenseDesignMatrix where
there can be any number of sources, not just X and possibly y.
Parameters
----------
data : ndarray, or tuple of ndarrays, containing the data.
It is formatted as specified in `data_specs`. For instance, if
`data_specs` is (VectorSpace(nfeat), 'features'), then `data` has to be
a 2-d ndarray, of shape (nb examples, nfeat), that defines an unlabeled
dataset. If `data_specs` is (CompositeSpace(Conv2DSpace(...),
VectorSpace(1)), ('features', 'target')), then `data` has to be an
(X, y) pair, with X being an ndarray containing images stored in the
topological view specified by the `Conv2DSpace`, and y being a 2-D
ndarray of width 1, containing the labels or targets for each example.
data_specs : (space, source) pair
space is an instance of `Space` (possibly a `CompositeSpace`),
and `source` is a string (or tuple of strings, if `space` is a
`CompositeSpace`), defining the format and labels associated
to `data`.
rng : object, optional
A random number generator used for picking random indices into the
design matrix when choosing minibatches.
preprocessor: WRITEME
fit_preprocessor: WRITEME
"""
_default_seed = (17, 2, 946)
def __init__(self, data=None, data_specs=None, rng=_default_seed,
preprocessor=None, fit_preprocessor=False):
# data_specs should be flat, and there should be no
# duplicates in source, as we keep only one version
assert is_flat_specs(data_specs)
if isinstance(data_specs[1], tuple):
assert sorted(set(data_specs[1])) == sorted(data_specs[1])
space, source = data_specs
if isinstance(data, list):
data = tuple(data)
space.np_validate(data)
assert len(set(elem.shape[0] for elem in list(data))) <= 1
self.data = data
self.data_specs = data_specs
self.num_examples = list(data)[0].shape[0]
self.compress = False
self.design_loc = None
self.rng = make_np_rng(rng, which_method='random_integers')
# Defaults for iterators
self._iter_mode = resolve_iterator_class('sequential')
if preprocessor:
preprocessor.apply(self, can_fit=fit_preprocessor)
self.preprocessor = preprocessor
@functools.wraps(Dataset.iterator)
def iterator(self, mode=None, batch_size=None, num_batches=None,
rng=None, data_specs=None,
return_tuple=False):
if mode is None:
if hasattr(self, '_iter_subset_class'):
mode = self._iter_subset_class
else:
raise ValueError('iteration mode not provided and no default '
'mode set for %s' % str(self))
else:
mode = resolve_iterator_class(mode)
if batch_size is None:
batch_size = getattr(self, '_iter_batch_size', None)
if num_batches is None:
num_batches = getattr(self, '_iter_num_batches', None)
if rng is None and mode.stochastic:
rng = self.rng
if data_specs is None:
data_specs = self.data_specs
return FiniteDatasetIterator(
self,
mode(self.get_num_examples(),
batch_size, num_batches, rng),
data_specs=data_specs, return_tuple=return_tuple
)
def get_data_specs(self):
"""
Returns the data_specs specifying how the data is internally stored.
This is the format the data returned by `self.get_data()` will be.
"""
return self.data_specs
def get_data(self):
"""
.. todo::
WRITEME
"""
return self.data
def set_data(self, data, data_specs):
"""
.. todo::
WRITEME
"""
# data is organized as data_specs
# keep self.data_specs, and convert data
data_specs[0].np_validate(data)
assert not [contains_nan(X) for X in data]
raise NotImplementedError()
def get_source(self, name):
"""
.. todo::
WRITEME
"""
raise NotImplementedError()
@wraps(Dataset.get_num_examples)
def get_num_examples(self):
return self.num_examples
def get_batch(self, batch_size, data_specs=None):
"""
.. todo::
WRITEME
"""
raise NotImplementedError()
"""
try:
idx = self.rng.randint(self.X.shape[0] - batch_size + 1)
except ValueError:
if batch_size > self.X.shape[0]:
raise ValueError("Requested "+str(batch_size)+" examples"
"from a dataset containing only "+str(self.X.shape[0]))
raise
rx = self.X[idx:idx + batch_size, :]
if include_labels:
if self.y is None:
return rx, None
ry = self.y[idx:idx + batch_size]
return rx, ry
rx = np.cast[config.floatX](rx)
return rx
"""
| bsd-3-clause |
lento/cortex | python/IECoreMaya/GenericParameterUI.py | 12 | 7845 | ##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import maya.mel
import IECore
import IECoreMaya.ParameterUI
import IECoreMaya.StringUtil
class GenericParameterUI( IECoreMaya.ParameterUI ) :
def __init__( self, node, parameter, **kw ) :
IECoreMaya.ParameterUI.__init__(
self,
node,
parameter,
maya.cmds.rowLayout(
numberOfColumns = 2,
columnWidth2 = [ self.textColumnWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex * 3 + 25 + 25 ]
),
**kw
)
maya.cmds.text( label = self.label(), font="smallPlainLabelFont", align="right", annotation=self.description() )
self.__connectionsLayout = maya.cmds.columnLayout()
maya.cmds.setParent( ".." )
maya.cmds.setParent( ".." )
self.replace( self.node(), self.parameter )
def replace( self, node, parameter ) :
IECoreMaya.ParameterUI.replace( self, node, parameter )
self.__updateDisplay()
self.__attributeChangedCallbackId = IECoreMaya.CallbackId(
maya.OpenMaya.MNodeMessage.addAttributeChangedCallback( self.node(), self.__attributeChanged )
)
def _topLevelUIDeleted( self ) :
self.__attributeChangedCallbackId = None
def _popupMenuDefinition( self, **kw ) :
definition = IECoreMaya.ParameterUI._popupMenuDefinition( self, **kw )
acceptedConnectionTypes = None
with IECore.IgnoredExceptions( KeyError ) :
acceptedConnectionTypes = list( self.parameter.userData()["UI"]["acceptedNodeTypes"] )
if acceptedConnectionTypes :
nodeNames = maya.cmds.ls( long=True, type=acceptedConnectionTypes )
if nodeNames :
definition.append( "/ConnectionsDivider", { "divider" : True } )
for nodeName in nodeNames :
definition.append( "/Connect To/%s" % nodeName, { "command" : IECore.curry( self.__connectToNode, nodeName ) } )
return definition
def __attributeChanged( self, changeType, plug, otherPlug, userData ) :
if not (
( changeType & maya.OpenMaya.MNodeMessage.kConnectionMade )
or ( changeType & maya.OpenMaya.MNodeMessage.kConnectionBroken )
) :
return
try :
myPlug = self.plug()
except :
# this situation can occur when our parameter has been removed but the
# ui we represent is not quite yet dead
return
if plug == myPlug :
maya.cmds.evalDeferred( self.__updateDisplay )
def __updateDisplay( self ) :
if not maya.cmds.layout( self.__connectionsLayout, query=True, exists=True ) :
return
currentParent = maya.cmds.setParent( query=True )
fnPH = IECoreMaya.FnParameterisedHolder( self.node() )
plugPath = fnPH.parameterPlugPath( self.parameter )
connections = maya.cmds.listConnections( plugPath, source=True, plugs=True, skipConversionNodes=True )
numConnections = 0
if connections :
numConnections = len( connections )
old = maya.cmds.columnLayout( self.__connectionsLayout, query=True, childArray=True )
if old :
for child in old :
maya.cmds.deleteUI( child )
maya.cmds.setParent( self.__connectionsLayout )
if numConnections == 0 :
maya.cmds.rowLayout(
numberOfColumns = 2,
columnWidth2 = [ IECoreMaya.ParameterUI.singleWidgetWidthIndex * 3 - 40, 20 ],
)
text = maya.cmds.text( align="left", label="Not connected", font="tinyBoldLabelFont" )
self._addPopupMenu( parentUI=text, attributeName = self.plugName() )
self._addPopupMenu( parentUI=text, attributeName = self.plugName(), button1=True )
maya.cmds.iconTextButton(
annotation = "Clicking this takes you the connection editor for this connection.",
style = "iconOnly",
image = "viewList.xpm",
font = "boldLabelFont",
command = IECore.curry( self.__connectionEditor, leftHandNode = None ),
height = 20,
width = 20
)
maya.cmds.setParent( ".." )
else :
for i in range( numConnections ) :
self.__drawConnection( connections[i] )
maya.cmds.setParent( currentParent )
def __drawConnection( self, plugName ) :
fieldWidth = IECoreMaya.ParameterUI.singleWidgetWidthIndex * 3 - 40
maya.cmds.rowLayout(
numberOfColumns = 3,
columnWidth3 = [ fieldWidth , 20, 20 ]
)
name = maya.cmds.text( l=plugName, font="tinyBoldLabelFont", align="left",
width=fieldWidth, height = 20, ann=plugName )
self._addPopupMenu( parentUI=name, attributeName = self.plugName() )
self._addPopupMenu( parentUI=name, attributeName = self.plugName(), button1=True )
maya.cmds.iconTextButton(
annotation = "Clicking this takes you the connection editor for this connection.",
style = "iconOnly",
image = "viewList.xpm",
font = "boldLabelFont",
command = IECore.curry( self.__connectionEditor, leftHandNode = plugName ),
height = 20,
width = 20
)
maya.cmds.iconTextButton(
annotation = "Clicking this will take you to the node sourcing this connection.",
style = "iconOnly",
image = "navButtonConnected.xpm",
command = IECore.curry( self.__showEditor, plugName ),
height = 20,
)
maya.cmds.setParent( ".." )
def __connectionEditor( self, leftHandNode ) :
maya.mel.eval(
str( "ConnectionEditor;" +
"nodeOutliner -e -replace %(right)s connectWindow|tl|cwForm|connectWindowPane|rightSideCW;"+
"connectWindowSetRightLabel %(right)s;") % { 'right' : self.nodeName() }
)
if leftHandNode :
maya.mel.eval(
str("nodeOutliner -e -replace %(left)s connectWindow|tl|cwForm|connectWindowPane|leftSideCW;"+
"connectWindowSetLeftLabel %(left)s;" ) % { 'left' : leftHandNode.split(".")[0] }
)
def __showEditor( self, attributeName ) :
maya.mel.eval( 'showEditor "' + IECoreMaya.StringUtil.nodeFromAttributePath( attributeName ) + '"' )
def __connectToNode( self, nodeName ) :
maya.cmds.connectAttr( nodeName + ".message", self.plugName(), force=True )
IECoreMaya.ParameterUI.registerUI( IECore.TypeId.Parameter, GenericParameterUI )
IECoreMaya.ParameterUI.registerUI( IECore.TypeId.Parameter, GenericParameterUI, 'generic' )
IECoreMaya.ParameterUI.registerUI( IECore.TypeId.StringParameter, GenericParameterUI, 'generic' )
| bsd-3-clause |
anfieldinc/ntp | docs/node_modules/browser-sync/node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 2214 | 1347 | #!/usr/bin/env python
import re
import json
# http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_2_0/wireless_subordinate_broker.py | 17 | 83365 | from ..broker import Broker
class WirelessSubordinateBroker(Broker):
controller = "wireless_subordinates"
def index(self, **kwargs):
"""Lists the available wireless subordinates. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which wireless subordinates was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which wireless subordinates was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param WirelessSubordinantID: The internal NetMRI identifier of the WirelessSubordinantID
:type WirelessSubordinantID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param WirelessSubordinantID: The internal NetMRI identifier of the WirelessSubordinantID
:type WirelessSubordinantID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the wireless subordinates as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of wireless subordinate methods. The listed methods will be called on each wireless subordinate returned and included in the output. Available methods are: data_source, device, vlan, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, vlan.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` WirelessSubordinantID
:param sort: The data field(s) to use for sorting the output. Default is WirelessSubordinantID. Valid values are WirelessSubordinantID, DataSourceID, DeviceID, SubStartTime, SubEndTime, SubChangedCols, SubTimestamp, SubMac, SubDeviceID, SubNumOfSlots, SubName, SubLocation, SubMonitorOnlyMode, SubOperationStatus, SubSoftwareVersion, SubBootVersion, SubModel, SubSerialNumber, SubIPNumeric, SubIPDotted, SubType, SubGroupVlanName, VlanID, SubAdminStatus, SubOSVersion.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each WirelessSubordinate. Valid values are WirelessSubordinantID, DataSourceID, DeviceID, SubStartTime, SubEndTime, SubChangedCols, SubTimestamp, SubMac, SubDeviceID, SubNumOfSlots, SubName, SubLocation, SubMonitorOnlyMode, SubOperationStatus, SubSoftwareVersion, SubBootVersion, SubModel, SubSerialNumber, SubIPNumeric, SubIPDotted, SubType, SubGroupVlanName, VlanID, SubAdminStatus, SubOSVersion. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return wireless_subordinates: An array of the WirelessSubordinate objects that match the specified input criteria.
:rtype wireless_subordinates: Array of WirelessSubordinate
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified wireless subordinate.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param WirelessSubordinantID: The internal NetMRI identifier of the WirelessSubordinantID
:type WirelessSubordinantID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of wireless subordinate methods. The listed methods will be called on each wireless subordinate returned and included in the output. Available methods are: data_source, device, vlan, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, vlan.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return wireless_subordinate: The wireless subordinate identified by the specified WirelessSubordinantID.
:rtype wireless_subordinate: WirelessSubordinate
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available wireless subordinates matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which wireless subordinates was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which wireless subordinates was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubAdminStatus: The configured status(up/down) of the interface.
:type SubAdminStatus: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubAdminStatus: The configured status(up/down) of the interface.
:type SubAdminStatus: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubBootVersion: The boot version of the wireless subordinate is running on this device.
:type SubBootVersion: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubBootVersion: The boot version of the wireless subordinate is running on this device.
:type SubBootVersion: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubChangedCols: The fields that changed between this revision of the record and the previous revision.
:type SubChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubChangedCols: The fields that changed between this revision of the record and the previous revision.
:type SubChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubDeviceID: The internal NetMRI identifier of each wireless subordinate device.
:type SubDeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubDeviceID: The internal NetMRI identifier of each wireless subordinate device.
:type SubDeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubEndTime: The ending effective date and time of this record was collected or calculated, or empty if still in effect.
:type SubEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubEndTime: The ending effective date and time of this record was collected or calculated, or empty if still in effect.
:type SubEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubGroupVlanName: The Vlan name of the wireless subordinate group.
:type SubGroupVlanName: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubGroupVlanName: The Vlan name of the wireless subordinate group.
:type SubGroupVlanName: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubIPDotted: The management IP address of the wireless subordinate in dotted,(or colon delimited for IPv6) format.
:type SubIPDotted: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubIPDotted: The management IP address of the wireless subordinate in dotted,(or colon delimited for IPv6) format.
:type SubIPDotted: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubIPNumeric: The numerical value of the wireless IP Address.
:type SubIPNumeric: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubIPNumeric: The numerical value of the wireless IP Address.
:type SubIPNumeric: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubLocation: The location as reported by the wireless.
:type SubLocation: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubLocation: The location as reported by the wireless.
:type SubLocation: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubMac: The Media Access Controller(MAC) address of the end host.
:type SubMac: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubMac: The Media Access Controller(MAC) address of the end host.
:type SubMac: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubModel: The wireless subordinate model name.
:type SubModel: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubModel: The wireless subordinate model name.
:type SubModel: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubMonitorOnlyMode: The mode operation of wireless is monitored in the NetMRI.
:type SubMonitorOnlyMode: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubMonitorOnlyMode: The mode operation of wireless is monitored in the NetMRI.
:type SubMonitorOnlyMode: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubName: The name of the wireless subordinates defined within NetMRI.
:type SubName: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubName: The name of the wireless subordinates defined within NetMRI.
:type SubName: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubNumOfSlots: The required number of slots available in the wireless subordinates.
:type SubNumOfSlots: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubNumOfSlots: The required number of slots available in the wireless subordinates.
:type SubNumOfSlots: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubOSVersion: The operating system version of the wireless subordinate is running on this device.
:type SubOSVersion: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubOSVersion: The operating system version of the wireless subordinate is running on this device.
:type SubOSVersion: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubOperationStatus: The operational status(up/down) of the wireless.
:type SubOperationStatus: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubOperationStatus: The operational status(up/down) of the wireless.
:type SubOperationStatus: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubSerialNumber: The vendor-specific serial number string for the wireless subordinates.The preferred value is the serial number string actually printed on the component itself(if present).
:type SubSerialNumber: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubSerialNumber: The vendor-specific serial number string for the wireless subordinates.The preferred value is the serial number string actually printed on the component itself(if present).
:type SubSerialNumber: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubSoftwareVersion: The software version is running on this device.
:type SubSoftwareVersion: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubSoftwareVersion: The software version is running on this device.
:type SubSoftwareVersion: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubStartTime: The starting effective date and time this record was collected or calculated.
:type SubStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubStartTime: The starting effective date and time this record was collected or calculated.
:type SubStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubTimestamp: The date and time this record was collected or calculated.
:type SubTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubTimestamp: The date and time this record was collected or calculated.
:type SubTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SubType: The NetMRI-determined subordinate type of the wireless.
:type SubType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SubType: The NetMRI-determined subordinate type of the wireless.
:type SubType: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the Vlan.
:type VlanID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the Vlan.
:type VlanID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param WirelessSubordinantID: The internal NetMRI identifier of the WirelessSubordinantID
:type WirelessSubordinantID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param WirelessSubordinantID: The internal NetMRI identifier of the WirelessSubordinantID
:type WirelessSubordinantID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the wireless subordinates as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of wireless subordinate methods. The listed methods will be called on each wireless subordinate returned and included in the output. Available methods are: data_source, device, vlan, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, vlan.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` WirelessSubordinantID
:param sort: The data field(s) to use for sorting the output. Default is WirelessSubordinantID. Valid values are WirelessSubordinantID, DataSourceID, DeviceID, SubStartTime, SubEndTime, SubChangedCols, SubTimestamp, SubMac, SubDeviceID, SubNumOfSlots, SubName, SubLocation, SubMonitorOnlyMode, SubOperationStatus, SubSoftwareVersion, SubBootVersion, SubModel, SubSerialNumber, SubIPNumeric, SubIPDotted, SubType, SubGroupVlanName, VlanID, SubAdminStatus, SubOSVersion.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each WirelessSubordinate. Valid values are WirelessSubordinantID, DataSourceID, DeviceID, SubStartTime, SubEndTime, SubChangedCols, SubTimestamp, SubMac, SubDeviceID, SubNumOfSlots, SubName, SubLocation, SubMonitorOnlyMode, SubOperationStatus, SubSoftwareVersion, SubBootVersion, SubModel, SubSerialNumber, SubIPNumeric, SubIPDotted, SubType, SubGroupVlanName, VlanID, SubAdminStatus, SubOSVersion. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against wireless subordinates, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceID, SubAdminStatus, SubBootVersion, SubChangedCols, SubDeviceID, SubEndTime, SubGroupVlanName, SubIPDotted, SubIPNumeric, SubLocation, SubMac, SubModel, SubMonitorOnlyMode, SubName, SubNumOfSlots, SubOSVersion, SubOperationStatus, SubSerialNumber, SubSoftwareVersion, SubStartTime, SubTimestamp, SubType, VlanID, WirelessSubordinantID.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return wireless_subordinates: An array of the WirelessSubordinate objects that match the specified input criteria.
:rtype wireless_subordinates: Array of WirelessSubordinate
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available wireless subordinates matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceID, SubAdminStatus, SubBootVersion, SubChangedCols, SubDeviceID, SubEndTime, SubGroupVlanName, SubIPDotted, SubIPNumeric, SubLocation, SubMac, SubModel, SubMonitorOnlyMode, SubName, SubNumOfSlots, SubOSVersion, SubOperationStatus, SubSerialNumber, SubSoftwareVersion, SubStartTime, SubTimestamp, SubType, VlanID, WirelessSubordinantID.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which wireless subordinates was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubAdminStatus: The operator to apply to the field SubAdminStatus. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubAdminStatus: The configured status(up/down) of the interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubAdminStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubAdminStatus: If op_SubAdminStatus is specified, the field named in this input will be compared to the value in SubAdminStatus using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubAdminStatus must be specified if op_SubAdminStatus is specified.
:type val_f_SubAdminStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubAdminStatus: If op_SubAdminStatus is specified, this value will be compared to the value in SubAdminStatus using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubAdminStatus must be specified if op_SubAdminStatus is specified.
:type val_c_SubAdminStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubBootVersion: The operator to apply to the field SubBootVersion. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubBootVersion: The boot version of the wireless subordinate is running on this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubBootVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubBootVersion: If op_SubBootVersion is specified, the field named in this input will be compared to the value in SubBootVersion using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubBootVersion must be specified if op_SubBootVersion is specified.
:type val_f_SubBootVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubBootVersion: If op_SubBootVersion is specified, this value will be compared to the value in SubBootVersion using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubBootVersion must be specified if op_SubBootVersion is specified.
:type val_c_SubBootVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubChangedCols: The operator to apply to the field SubChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubChangedCols: If op_SubChangedCols is specified, the field named in this input will be compared to the value in SubChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubChangedCols must be specified if op_SubChangedCols is specified.
:type val_f_SubChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubChangedCols: If op_SubChangedCols is specified, this value will be compared to the value in SubChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubChangedCols must be specified if op_SubChangedCols is specified.
:type val_c_SubChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubDeviceID: The operator to apply to the field SubDeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubDeviceID: The internal NetMRI identifier of each wireless subordinate device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubDeviceID: If op_SubDeviceID is specified, the field named in this input will be compared to the value in SubDeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubDeviceID must be specified if op_SubDeviceID is specified.
:type val_f_SubDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubDeviceID: If op_SubDeviceID is specified, this value will be compared to the value in SubDeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubDeviceID must be specified if op_SubDeviceID is specified.
:type val_c_SubDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubEndTime: The operator to apply to the field SubEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubEndTime: The ending effective date and time of this record was collected or calculated, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubEndTime: If op_SubEndTime is specified, the field named in this input will be compared to the value in SubEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubEndTime must be specified if op_SubEndTime is specified.
:type val_f_SubEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubEndTime: If op_SubEndTime is specified, this value will be compared to the value in SubEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubEndTime must be specified if op_SubEndTime is specified.
:type val_c_SubEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubGroupVlanName: The operator to apply to the field SubGroupVlanName. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubGroupVlanName: The Vlan name of the wireless subordinate group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubGroupVlanName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubGroupVlanName: If op_SubGroupVlanName is specified, the field named in this input will be compared to the value in SubGroupVlanName using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubGroupVlanName must be specified if op_SubGroupVlanName is specified.
:type val_f_SubGroupVlanName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubGroupVlanName: If op_SubGroupVlanName is specified, this value will be compared to the value in SubGroupVlanName using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubGroupVlanName must be specified if op_SubGroupVlanName is specified.
:type val_c_SubGroupVlanName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubIPDotted: The operator to apply to the field SubIPDotted. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubIPDotted: The management IP address of the wireless subordinate in dotted,(or colon delimited for IPv6) format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubIPDotted: If op_SubIPDotted is specified, the field named in this input will be compared to the value in SubIPDotted using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubIPDotted must be specified if op_SubIPDotted is specified.
:type val_f_SubIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubIPDotted: If op_SubIPDotted is specified, this value will be compared to the value in SubIPDotted using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubIPDotted must be specified if op_SubIPDotted is specified.
:type val_c_SubIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubIPNumeric: The operator to apply to the field SubIPNumeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubIPNumeric: The numerical value of the wireless IP Address. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubIPNumeric: If op_SubIPNumeric is specified, the field named in this input will be compared to the value in SubIPNumeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubIPNumeric must be specified if op_SubIPNumeric is specified.
:type val_f_SubIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubIPNumeric: If op_SubIPNumeric is specified, this value will be compared to the value in SubIPNumeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubIPNumeric must be specified if op_SubIPNumeric is specified.
:type val_c_SubIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubLocation: The operator to apply to the field SubLocation. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubLocation: The location as reported by the wireless. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubLocation: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubLocation: If op_SubLocation is specified, the field named in this input will be compared to the value in SubLocation using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubLocation must be specified if op_SubLocation is specified.
:type val_f_SubLocation: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubLocation: If op_SubLocation is specified, this value will be compared to the value in SubLocation using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubLocation must be specified if op_SubLocation is specified.
:type val_c_SubLocation: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubMac: The operator to apply to the field SubMac. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubMac: The Media Access Controller(MAC) address of the end host. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubMac: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubMac: If op_SubMac is specified, the field named in this input will be compared to the value in SubMac using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubMac must be specified if op_SubMac is specified.
:type val_f_SubMac: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubMac: If op_SubMac is specified, this value will be compared to the value in SubMac using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubMac must be specified if op_SubMac is specified.
:type val_c_SubMac: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubModel: The operator to apply to the field SubModel. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubModel: The wireless subordinate model name. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubModel: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubModel: If op_SubModel is specified, the field named in this input will be compared to the value in SubModel using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubModel must be specified if op_SubModel is specified.
:type val_f_SubModel: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubModel: If op_SubModel is specified, this value will be compared to the value in SubModel using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubModel must be specified if op_SubModel is specified.
:type val_c_SubModel: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubMonitorOnlyMode: The operator to apply to the field SubMonitorOnlyMode. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubMonitorOnlyMode: The mode operation of wireless is monitored in the NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubMonitorOnlyMode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubMonitorOnlyMode: If op_SubMonitorOnlyMode is specified, the field named in this input will be compared to the value in SubMonitorOnlyMode using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubMonitorOnlyMode must be specified if op_SubMonitorOnlyMode is specified.
:type val_f_SubMonitorOnlyMode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubMonitorOnlyMode: If op_SubMonitorOnlyMode is specified, this value will be compared to the value in SubMonitorOnlyMode using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubMonitorOnlyMode must be specified if op_SubMonitorOnlyMode is specified.
:type val_c_SubMonitorOnlyMode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubName: The operator to apply to the field SubName. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubName: The name of the wireless subordinates defined within NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubName: If op_SubName is specified, the field named in this input will be compared to the value in SubName using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubName must be specified if op_SubName is specified.
:type val_f_SubName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubName: If op_SubName is specified, this value will be compared to the value in SubName using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubName must be specified if op_SubName is specified.
:type val_c_SubName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubNumOfSlots: The operator to apply to the field SubNumOfSlots. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubNumOfSlots: The required number of slots available in the wireless subordinates. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubNumOfSlots: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubNumOfSlots: If op_SubNumOfSlots is specified, the field named in this input will be compared to the value in SubNumOfSlots using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubNumOfSlots must be specified if op_SubNumOfSlots is specified.
:type val_f_SubNumOfSlots: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubNumOfSlots: If op_SubNumOfSlots is specified, this value will be compared to the value in SubNumOfSlots using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubNumOfSlots must be specified if op_SubNumOfSlots is specified.
:type val_c_SubNumOfSlots: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubOSVersion: The operator to apply to the field SubOSVersion. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubOSVersion: The operating system version of the wireless subordinate is running on this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubOSVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubOSVersion: If op_SubOSVersion is specified, the field named in this input will be compared to the value in SubOSVersion using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubOSVersion must be specified if op_SubOSVersion is specified.
:type val_f_SubOSVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubOSVersion: If op_SubOSVersion is specified, this value will be compared to the value in SubOSVersion using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubOSVersion must be specified if op_SubOSVersion is specified.
:type val_c_SubOSVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubOperationStatus: The operator to apply to the field SubOperationStatus. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubOperationStatus: The operational status(up/down) of the wireless. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubOperationStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubOperationStatus: If op_SubOperationStatus is specified, the field named in this input will be compared to the value in SubOperationStatus using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubOperationStatus must be specified if op_SubOperationStatus is specified.
:type val_f_SubOperationStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubOperationStatus: If op_SubOperationStatus is specified, this value will be compared to the value in SubOperationStatus using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubOperationStatus must be specified if op_SubOperationStatus is specified.
:type val_c_SubOperationStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubSerialNumber: The operator to apply to the field SubSerialNumber. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubSerialNumber: The vendor-specific serial number string for the wireless subordinates.The preferred value is the serial number string actually printed on the component itself(if present). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubSerialNumber: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubSerialNumber: If op_SubSerialNumber is specified, the field named in this input will be compared to the value in SubSerialNumber using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubSerialNumber must be specified if op_SubSerialNumber is specified.
:type val_f_SubSerialNumber: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubSerialNumber: If op_SubSerialNumber is specified, this value will be compared to the value in SubSerialNumber using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubSerialNumber must be specified if op_SubSerialNumber is specified.
:type val_c_SubSerialNumber: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubSoftwareVersion: The operator to apply to the field SubSoftwareVersion. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubSoftwareVersion: The software version is running on this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubSoftwareVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubSoftwareVersion: If op_SubSoftwareVersion is specified, the field named in this input will be compared to the value in SubSoftwareVersion using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubSoftwareVersion must be specified if op_SubSoftwareVersion is specified.
:type val_f_SubSoftwareVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubSoftwareVersion: If op_SubSoftwareVersion is specified, this value will be compared to the value in SubSoftwareVersion using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubSoftwareVersion must be specified if op_SubSoftwareVersion is specified.
:type val_c_SubSoftwareVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubStartTime: The operator to apply to the field SubStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubStartTime: The starting effective date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubStartTime: If op_SubStartTime is specified, the field named in this input will be compared to the value in SubStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubStartTime must be specified if op_SubStartTime is specified.
:type val_f_SubStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubStartTime: If op_SubStartTime is specified, this value will be compared to the value in SubStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubStartTime must be specified if op_SubStartTime is specified.
:type val_c_SubStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubTimestamp: The operator to apply to the field SubTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubTimestamp: If op_SubTimestamp is specified, the field named in this input will be compared to the value in SubTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubTimestamp must be specified if op_SubTimestamp is specified.
:type val_f_SubTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubTimestamp: If op_SubTimestamp is specified, this value will be compared to the value in SubTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubTimestamp must be specified if op_SubTimestamp is specified.
:type val_c_SubTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SubType: The operator to apply to the field SubType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SubType: The NetMRI-determined subordinate type of the wireless. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SubType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SubType: If op_SubType is specified, the field named in this input will be compared to the value in SubType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SubType must be specified if op_SubType is specified.
:type val_f_SubType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SubType: If op_SubType is specified, this value will be compared to the value in SubType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SubType must be specified if op_SubType is specified.
:type val_c_SubType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanID: The operator to apply to the field VlanID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanID: The internal NetMRI identifier of the Vlan. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanID: If op_VlanID is specified, the field named in this input will be compared to the value in VlanID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanID must be specified if op_VlanID is specified.
:type val_f_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanID: If op_VlanID is specified, this value will be compared to the value in VlanID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanID must be specified if op_VlanID is specified.
:type val_c_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_WirelessSubordinantID: The operator to apply to the field WirelessSubordinantID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. WirelessSubordinantID: The internal NetMRI identifier of the WirelessSubordinantID For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_WirelessSubordinantID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_WirelessSubordinantID: If op_WirelessSubordinantID is specified, the field named in this input will be compared to the value in WirelessSubordinantID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_WirelessSubordinantID must be specified if op_WirelessSubordinantID is specified.
:type val_f_WirelessSubordinantID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_WirelessSubordinantID: If op_WirelessSubordinantID is specified, this value will be compared to the value in WirelessSubordinantID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_WirelessSubordinantID must be specified if op_WirelessSubordinantID is specified.
:type val_c_WirelessSubordinantID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the wireless subordinates as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of wireless subordinate methods. The listed methods will be called on each wireless subordinate returned and included in the output. Available methods are: data_source, device, vlan, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, vlan.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` WirelessSubordinantID
:param sort: The data field(s) to use for sorting the output. Default is WirelessSubordinantID. Valid values are WirelessSubordinantID, DataSourceID, DeviceID, SubStartTime, SubEndTime, SubChangedCols, SubTimestamp, SubMac, SubDeviceID, SubNumOfSlots, SubName, SubLocation, SubMonitorOnlyMode, SubOperationStatus, SubSoftwareVersion, SubBootVersion, SubModel, SubSerialNumber, SubIPNumeric, SubIPDotted, SubType, SubGroupVlanName, VlanID, SubAdminStatus, SubOSVersion.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each WirelessSubordinate. Valid values are WirelessSubordinantID, DataSourceID, DeviceID, SubStartTime, SubEndTime, SubChangedCols, SubTimestamp, SubMac, SubDeviceID, SubNumOfSlots, SubName, SubLocation, SubMonitorOnlyMode, SubOperationStatus, SubSoftwareVersion, SubBootVersion, SubModel, SubSerialNumber, SubIPNumeric, SubIPDotted, SubType, SubGroupVlanName, VlanID, SubAdminStatus, SubOSVersion. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return wireless_subordinates: An array of the WirelessSubordinate objects that match the specified input criteria.
:rtype wireless_subordinates: Array of WirelessSubordinate
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param WirelessSubordinantID: The internal NetMRI identifier of the WirelessSubordinantID
:type WirelessSubordinantID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def device(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param WirelessSubordinantID: The internal NetMRI identifier of the WirelessSubordinantID
:type WirelessSubordinantID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
def vlan(self, **kwargs):
"""vlan
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param WirelessSubordinantID: The internal NetMRI identifier of the WirelessSubordinantID
:type WirelessSubordinantID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : vlan
:rtype : Vlan
"""
return self.api_request(self._get_method_fullname("vlan"), kwargs)
def infradevice(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param WirelessSubordinantID: The internal NetMRI identifier of the WirelessSubordinantID
:type WirelessSubordinantID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
| apache-2.0 |
potash/scikit-learn | sklearn/feature_selection/from_model.py | 7 | 9546 | # Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena
# License: BSD 3 clause
import numpy as np
from .base import SelectorMixin
from ..base import TransformerMixin, BaseEstimator, clone
from ..externals import six
from ..utils import safe_mask, check_array, deprecated
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
def _get_feature_importances(estimator):
"""Retrieve or aggregate feature importances from estimator"""
importances = getattr(estimator, "feature_importances_", None)
if importances is None and hasattr(estimator, "coef_"):
if estimator.coef_.ndim == 1:
importances = np.abs(estimator.coef_)
else:
importances = np.sum(np.abs(estimator.coef_), axis=0)
elif importances is None:
raise ValueError(
"The underlying estimator %s has no `coef_` or "
"`feature_importances_` attribute. Either pass a fitted estimator"
" to SelectFromModel or call fit before calling transform."
% estimator.__class__.__name__)
return importances
def _calculate_threshold(estimator, importances, threshold):
"""Interpret the threshold value"""
if threshold is None:
# determine default from estimator
est_name = estimator.__class__.__name__
if ((hasattr(estimator, "penalty") and estimator.penalty == "l1") or
"Lasso" in est_name):
# the natural default threshold is 0 when l1 penalty was used
threshold = 1e-5
else:
threshold = "mean"
if isinstance(threshold, six.string_types):
if "*" in threshold:
scale, reference = threshold.split("*")
scale = float(scale.strip())
reference = reference.strip()
if reference == "median":
reference = np.median(importances)
elif reference == "mean":
reference = np.mean(importances)
else:
raise ValueError("Unknown reference: " + reference)
threshold = scale * reference
elif threshold == "median":
threshold = np.median(importances)
elif threshold == "mean":
threshold = np.mean(importances)
else:
raise ValueError("Expected threshold='mean' or threshold='median' "
"got %s" % threshold)
else:
threshold = float(threshold)
return threshold
class _LearntSelectorMixin(TransformerMixin):
# Note because of the extra threshold parameter in transform, this does
# not naturally extend from SelectorMixin
"""Transformer mixin selecting features based on importance weights.
This implementation can be mixin on any estimator that exposes a
``feature_importances_`` or ``coef_`` attribute to evaluate the relative
importance of individual features for feature selection.
"""
@deprecated('Support to use estimators as feature selectors will be '
'removed in version 0.19. Use SelectFromModel instead.')
def transform(self, X, threshold=None):
"""Reduce X to its most important features.
Uses ``coef_`` or ``feature_importances_`` to determine the most
important features. For models with a ``coef_`` for each class, the
absolute sum over the classes is used.
Parameters
----------
X : array or scipy sparse matrix of shape [n_samples, n_features]
The input samples.
threshold : string, float or None, optional (default=None)
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the threshold value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None and if
available, the object attribute ``threshold`` is used. Otherwise,
"mean" is used by default.
Returns
-------
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the selected features.
"""
check_is_fitted(self, ('coef_', 'feature_importances_'),
all_or_any=any)
X = check_array(X, 'csc')
importances = _get_feature_importances(self)
if len(importances) != X.shape[1]:
raise ValueError("X has different number of features than"
" during model fitting.")
if threshold is None:
threshold = getattr(self, 'threshold', None)
threshold = _calculate_threshold(self, importances, threshold)
# Selection
try:
mask = importances >= threshold
except TypeError:
# Fails in Python 3.x when threshold is str;
# result is array of True
raise ValueError("Invalid threshold: all features are discarded.")
if np.any(mask):
mask = safe_mask(X, mask)
return X[:, mask]
else:
raise ValueError("Invalid threshold: all features are discarded.")
class SelectFromModel(BaseEstimator, SelectorMixin):
"""Meta-transformer for selecting features based on importance weights.
.. versionadded:: 0.17
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
This can be both a fitted (if ``prefit`` is set to True)
or a non-fitted estimator.
threshold : string, float, optional default None
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the ``threshold`` value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None and if the
estimator has a parameter penalty set to l1, either explicitly
or implicitly (e.g, Lasso), the threshold used is 1e-5.
Otherwise, "mean" is used by default.
prefit : bool, default False
Whether a prefit model is expected to be passed into the constructor
directly or not. If True, ``transform`` must be called directly
and SelectFromModel cannot be used with ``cross_val_score``,
``GridSearchCV`` and similar utilities that clone the estimator.
Otherwise train the model using ``fit`` and then ``transform`` to do
feature selection.
Attributes
----------
`estimator_`: an estimator
The base estimator from which the transformer is built.
This is stored only when a non-fitted estimator is passed to the
``SelectFromModel``, i.e when prefit is False.
`threshold_`: float
The threshold value used for feature selection.
"""
def __init__(self, estimator, threshold=None, prefit=False):
self.estimator = estimator
self.threshold = threshold
self.prefit = prefit
def _get_support_mask(self):
# SelectFromModel can directly call on transform.
if self.prefit:
estimator = self.estimator
elif hasattr(self, 'estimator_'):
estimator = self.estimator_
else:
raise ValueError(
'Either fit the model before transform or set "prefit=True"'
' while passing the fitted estimator to the constructor.')
scores = _get_feature_importances(estimator)
self.threshold_ = _calculate_threshold(estimator, scores,
self.threshold)
return scores >= self.threshold_
def fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
if not hasattr(self, "estimator_"):
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **fit_params)
return self
def partial_fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer only once.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
if not hasattr(self, "estimator_"):
self.estimator_ = clone(self.estimator)
self.estimator_.partial_fit(X, y, **fit_params)
return self
| bsd-3-clause |
IV-GII/SocialCookies | ENV1/lib/python2.7/site-packages/django/contrib/comments/views/utils.py | 176 | 1980 | """
A few bits of helper functions for comment views.
"""
import textwrap
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, resolve_url
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.contrib import comments
from django.utils.http import is_safe_url
from django.utils.six.moves.urllib.parse import urlencode
def next_redirect(request, fallback, **get_kwargs):
"""
Handle the "where should I go next?" part of comment views.
The next value could be a
``?next=...`` GET arg or the URL of a given view (``fallback``). See
the view modules for examples.
Returns an ``HttpResponseRedirect``.
"""
next = request.POST.get('next')
if not is_safe_url(url=next, host=request.get_host()):
next = resolve_url(fallback)
if get_kwargs:
if '#' in next:
tmp = next.rsplit('#', 1)
next = tmp[0]
anchor = '#' + tmp[1]
else:
anchor = ''
joiner = '&' if '?' in next else '?'
next += joiner + urlencode(get_kwargs) + anchor
return HttpResponseRedirect(next)
def confirmation_view(template, doc="Display a confirmation view."):
"""
Confirmation view generator for the "comment was
posted/flagged/deleted/approved" views.
"""
def confirmed(request):
comment = None
if 'c' in request.GET:
try:
comment = comments.get_model().objects.get(pk=request.GET['c'])
except (ObjectDoesNotExist, ValueError):
pass
return render_to_response(template,
{'comment': comment},
context_instance=RequestContext(request)
)
confirmed.__doc__ = textwrap.dedent("""\
%s
Templates: :template:`%s``
Context:
comment
The posted comment
""" % (doc, template)
)
return confirmed
| gpl-2.0 |
zikifer/node-gyp | legacy/tools/gyp/pylib/gyp/MSVSSettings_test.py | 42 | 65827 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 3)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'Pure',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Mt': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Mt/notgood3',
"Warning: for Mt/GenerateCatalogFiles, expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Mt/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Mt': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Mt:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Mt': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
| mit |
tensorflow/probability | tensorflow_probability/python/internal/backend/numpy/gen/linear_operator_util.py | 1 | 23768 | # Copyright 2020 The TensorFlow Probability Authors. All Rights Reserved.
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# THIS FILE IS AUTO-GENERATED BY `gen_linear_operators.py`.
# DO NOT MODIFY DIRECTLY.
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# pylint: disable=g-import-not-at-top
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-bad-import-order
# pylint: disable=unused-import
# pylint: disable=line-too-long
# pylint: disable=reimported
# pylint: disable=g-bool-id-comparison
# pylint: disable=g-statement-before-imports
# pylint: disable=bad-continuation
# pylint: disable=useless-import-alias
# pylint: disable=property-with-parameters
# pylint: disable=trailing-whitespace
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Internal utilities for `LinearOperator` classes."""
from __future__ import absolute_import
from __future__ import division
# [internal] enable type annotations
from __future__ import print_function
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import dtype as dtypes
from tensorflow_probability.python.internal.backend.numpy import ops
from tensorflow_probability.python.internal.backend.numpy import ops as module
from tensorflow_probability.python.internal.backend.numpy import numpy_array as array_ops
from tensorflow_probability.python.internal.backend.numpy import debugging as check_ops
from tensorflow_probability.python.internal.backend.numpy import control_flow as control_flow_ops
from tensorflow_probability.python.internal.backend.numpy import linalg_impl as linalg_ops
from tensorflow_probability.python.internal.backend.numpy import numpy_math as math_ops
from tensorflow_probability.python.internal.backend.numpy import ops as variables_module
from tensorflow_probability.python.internal.backend.numpy import nest
################################################################################
# To make more friendly for TF2.
################################################################################
def convert_nonref_to_tensor(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor` if input is nonreference type.
This function converts Python objects of various types to `Tensor` objects
except if the input has nonreference semantics. Reference semantics are
characterized by `is_ref` and is any object which is a
`tf.Variable` or instance of `tf.Module`. This function accepts any input
which `tf.convert_to_tensor` would also.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
dtype_hint: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so dtype_hint
can be used as a soft preference. If the conversion to
`dtype_hint` is not possible, this argument has no effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
tensor: A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
#### Examples:
```python
x = tf.Variable(0.)
y = convert_nonref_to_tensor(x)
x is y
# ==> True
x = tf.constant(0.)
y = convert_nonref_to_tensor(x)
x is y
# ==> True
x = np.array(0.)
y = convert_nonref_to_tensor(x)
x is y
# ==> False
tf.is_tensor(y)
# ==> True
x = tfp.util.DeferredTensor(13.37, lambda x: x)
y = convert_nonref_to_tensor(x)
x is y
# ==> True
tf.is_tensor(y)
# ==> False
tf.equal(y, 13.37)
# ==> True
```
"""
# We explicitly do not use a tf.name_scope to avoid graph clutter.
if value is None:
return None
if is_ref(value):
if dtype is None:
return value
dtype_base = base_dtype(dtype)
value_dtype_base = base_dtype(value.dtype)
if dtype_base != value_dtype_base:
raise TypeError('Mutable type must be of dtype "{}" but is "{}".'.format(
dtype_name(dtype_base), dtype_name(value_dtype_base)))
return value
return ops.convert_to_tensor(
value, dtype=dtype, dtype_hint=dtype_hint, name=name)
def base_dtype(dtype):
"""Returns a non-reference `dtype` based on this `dtype`."""
dtype = dtypes.as_dtype(dtype)
if hasattr(dtype, "base_dtype"):
return dtype
return dtype
def dtype_name(dtype):
"""Returns the string name for this `dtype`."""
dtype = dtypes.as_dtype(dtype)
if hasattr(dtype, "name"):
return dtype.name
if hasattr(dtype, "__name__"):
return dtype.__name__
return str(dtype)
def check_dtype(arg, dtype):
"""Check that arg.dtype == self.dtype."""
if arg.dtype != dtype:
raise TypeError(
"Expected argument to have dtype %s. Found: %s in tensor %s" %
(dtype, arg.dtype, arg))
def is_ref(x):
"""Evaluates if the object has reference semantics.
An object is deemed "reference" if it is a `tf.Variable` instance or is
derived from a `tf.Module` with `dtype` and `shape` properties.
Args:
x: Any object.
Returns:
is_ref: Python `bool` indicating input is has nonreference semantics, i.e.,
is a `tf.Variable` or a `tf.Module` with `dtype` and `shape` properties.
"""
return (
# Note: we check that tf.Variable is a class because we might be using a
# different backend other than TF.
isinstance(x, variables_module.Variable) or
(isinstance(x, module.Module) and hasattr(x, "dtype") and
hasattr(x, "shape")))
def assert_not_ref_type(x, arg_name):
if is_ref(x):
raise TypeError(
"Argument %s cannot be reference type. Found: %s" % (arg_name, type(x)))
################################################################################
# Asserts.
################################################################################
def assert_no_entries_with_modulus_zero(
x, message=None, name="assert_no_entries_with_modulus_zero"):
"""Returns `Op` that asserts Tensor `x` has no entries with modulus zero.
Args:
x: Numeric `Tensor`, real, integer, or complex.
message: A string message to prepend to failure message.
name: A name to give this `Op`.
Returns:
An `Op` that asserts `x` has no entries with modulus zero.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
should_be_nonzero = math_ops.abs(x)
zero = ops.convert_to_tensor(0, dtype=dtypes.real_dtype(dtype))
return check_ops.assert_less(zero, should_be_nonzero, message=message)
def assert_zero_imag_part(x, message=None, name="assert_zero_imag_part"):
"""Returns `Op` that asserts Tensor `x` has no non-zero imaginary parts.
Args:
x: Numeric `Tensor`, real, integer, or complex.
message: A string message to prepend to failure message.
name: A name to give this `Op`.
Returns:
An `Op` that asserts `x` has no entries with modulus zero.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
if np.issubdtype(dtype, np.floating):
return control_flow_ops.no_op()
zero = ops.convert_to_tensor(0, dtype=dtypes.real_dtype(dtype))
return check_ops.assert_equal(zero, math_ops.imag(x), message=message)
def assert_compatible_matrix_dimensions(operator, x):
"""Assert that an argument to solve/matmul has proper domain dimension.
If `tensor_shape.TensorShape(operator.shape)[-2:] = [M, N]`, and `tensor_shape.TensorShape(x.shape)[-2:] = [Q, R]`, then
`operator.matmul(x)` is defined only if `N = Q`. This `Op` returns an
`Assert` that "fires" if this is not the case. Static checks are already
done by the base class `LinearOperator`.
Args:
operator: `LinearOperator`.
x: `Tensor`.
Returns:
`Assert` `Op`.
"""
# Static checks are done in the base class. Only tensor asserts here.
assert_same_dd = check_ops.assert_equal(
array_ops.shape(x)[-2],
operator.domain_dimension_tensor(),
# This error message made to look similar to error raised by static check
# in the base class.
message=("Dimensions are not compatible. "
"shape[-2] of argument to be the same as this operator"))
return assert_same_dd
def assert_is_batch_matrix(tensor):
"""Static assert that `tensor` has rank `2` or higher."""
sh = tensor_shape.TensorShape(tensor.shape)
if sh.ndims is not None and sh.ndims < 2:
raise ValueError(
"Expected [batch] matrix to have at least two dimensions. Found: "
"%s" % tensor)
def shape_tensor(shape, name=None):
"""Convert Tensor using default type, unless empty list or tuple."""
# Works just like random_ops._ShapeTensor.
if isinstance(shape, (tuple, list)) and not shape:
dtype = dtypes.int32
else:
dtype = None
return ops.convert_to_tensor(shape, dtype=dtype, name=name)
################################################################################
# Broadcasting versions of common linear algebra functions.
# TODO(b/77519145) Do this more efficiently in some special cases.
################################################################################
def broadcast_matrix_batch_dims(batch_matrices, name=None):
"""Broadcast leading dimensions of zero or more [batch] matrices.
Example broadcasting one batch dim of two simple matrices.
```python
x = [[1, 2],
[3, 4]] # Shape [2, 2], no batch dims
y = [[[1]]] # Shape [1, 1, 1], 1 batch dim of shape [1]
x_bc, y_bc = broadcast_matrix_batch_dims([x, y])
x_bc
==> [[[1, 2],
[3, 4]]] # Shape [1, 2, 2], 1 batch dim of shape [1].
y_bc
==> same as y
```
Example broadcasting many batch dims
```python
x = tf.random.normal(shape=(2, 3, 1, 4, 4))
y = tf.random.normal(shape=(1, 3, 2, 5, 5))
x_bc, y_bc = broadcast_matrix_batch_dims([x, y])
tensor_shape.TensorShape(x_bc.shape)
==> (2, 3, 2, 4, 4)
tensor_shape.TensorShape(y_bc.shape)
==> (2, 3, 2, 5, 5)
```
Args:
batch_matrices: Iterable of `Tensor`s, each having two or more dimensions.
name: A string name to prepend to created ops.
Returns:
bcast_matrices: List of `Tensor`s, with `bcast_matrices[i]` containing
the values from `batch_matrices[i]`, with possibly broadcast batch dims.
Raises:
ValueError: If any input `Tensor` is statically determined to have less
than two dimensions.
"""
with ops.name_scope(
name or "broadcast_matrix_batch_dims", values=batch_matrices):
check_ops.assert_proper_iterable(batch_matrices)
batch_matrices = list(batch_matrices)
for i, mat in enumerate(batch_matrices):
batch_matrices[i] = ops.convert_to_tensor(mat)
assert_is_batch_matrix(batch_matrices[i])
if len(batch_matrices) < 2:
return batch_matrices
# Try static broadcasting.
# bcast_batch_shape is the broadcast batch shape of ALL matrices.
# E.g. if batch_matrices = [x, y], with
# tensor_shape.TensorShape(x.shape) = [2, j, k] (batch shape = [2])
# tensor_shape.TensorShape(y.shape) = [3, 1, l, m] (batch shape = [3, 1])
# ==> bcast_batch_shape = [3, 2]
bcast_batch_shape = tensor_shape.TensorShape(batch_matrices[0].shape)[:-2]
for mat in batch_matrices[1:]:
bcast_batch_shape = _ops.broadcast_static_shape(
bcast_batch_shape,
tensor_shape.TensorShape(mat.shape)[:-2])
if bcast_batch_shape.is_fully_defined():
for i, mat in enumerate(batch_matrices):
if tensor_shape.TensorShape(mat.shape)[:-2] != bcast_batch_shape:
bcast_shape = array_ops.concat(
[bcast_batch_shape.as_list(), array_ops.shape(mat)[-2:]], axis=0)
batch_matrices[i] = _ops.broadcast_to(mat, bcast_shape)
return batch_matrices
# Since static didn't work, do dynamic, which always copies data.
bcast_batch_shape = array_ops.shape(batch_matrices[0])[:-2]
for mat in batch_matrices[1:]:
bcast_batch_shape = array_ops.broadcast_dynamic_shape(
bcast_batch_shape,
array_ops.shape(mat)[:-2])
for i, mat in enumerate(batch_matrices):
batch_matrices[i] = _ops.broadcast_to(
mat,
array_ops.concat(
[bcast_batch_shape, array_ops.shape(mat)[-2:]], axis=0))
return batch_matrices
def matrix_solve_with_broadcast(matrix, rhs, adjoint=False, name=None):
"""Solve systems of linear equations."""
with ops.name_scope(name, "MatrixSolveWithBroadcast", [matrix, rhs]):
matrix = ops.convert_to_tensor(matrix, name="matrix")
rhs = ops.convert_to_tensor(
rhs, name="rhs", dtype=matrix.dtype)
# If either matrix/rhs has extra dims, we can reshape to get rid of them.
matrix, rhs, reshape_inv, still_need_to_transpose = _reshape_for_efficiency(
matrix, rhs, adjoint_a=adjoint)
# This will broadcast by brute force if we still need to.
matrix, rhs = broadcast_matrix_batch_dims([matrix, rhs])
solution = _linalg.solve(
matrix, rhs, adjoint=adjoint and still_need_to_transpose)
return reshape_inv(solution)
def _reshape_for_efficiency(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False):
"""Maybe reshape a, b, and return an inverse map. For matmul/solve."""
def identity(x):
return x
# At this point, we have not taken transpose/adjoint of a/b.
still_need_to_transpose = True
if tensor_shape.TensorShape(a.shape).ndims is None or tensor_shape.TensorShape(b.shape).ndims is None:
return a, b, identity, still_need_to_transpose
# This could be handled in the future, but seems less common.
if tensor_shape.TensorShape(a.shape).ndims >= tensor_shape.TensorShape(b.shape).ndims:
return a, b, identity, still_need_to_transpose
# From now on, we might modify b, but will not modify a.
# Suppose:
# tensor_shape.TensorShape(a.shape) = C + [m, n], tensor_shape.TensorShape(b.shape) =
# tensor_shape.TensorShape(b.shape) = S + C + [n, r]
b_extra_ndims = tensor_shape.TensorShape(b.shape).ndims - tensor_shape.TensorShape(a.shape).ndims
# b_extra_sh = S, b_main_sh = C + [n, r]
b_extra_sh = array_ops.shape(b)[:b_extra_ndims]
b_main_sh = array_ops.shape(b)[b_extra_ndims:]
# No reason to flip unless the extra dims of b are big enough. Why?
# Assume adjoint/transpose = False. Then...
# By not flipping, we have to replicate a to shape
# b_extra_sh + tensor_shape.TensorShape(a.shape),
# which could use extra memory. But in all cases, the final output has shape
# b_extra_sh + tensor_shape.TensorShape(a.shape)[:-1] + tensor_shape.TensorShape([b.shape)[-1]]
# So we only end up creating a larger object if the end dim of b is smaller
# than the end dim of a. This often happens, e.g. if b was a vector that was
# expanded to a matrix (by appending a singleton).
# Since adjoint/transpose may not be False, we must make adjustments here.
# The dim of b that holds the multiple equations.
a_domain_sz_ = tensor_shape.TensorShape(a.shape)[-2 if adjoint_a or transpose_a else -1]
b_eq_sz_ = tensor_shape.TensorShape(b.shape)[-2 if adjoint_b or transpose_b else -1]
b_extra_sz_ = (
np.prod(tensor_shape.TensorShape(b.shape)[:b_extra_ndims].as_list())
if tensor_shape.TensorShape(b.shape)[:b_extra_ndims].is_fully_defined() else None)
if (a_domain_sz_ is not None and b_eq_sz_ is not None and
b_extra_sz_ is not None):
if b_extra_sz_ < 2 or a_domain_sz_ <= b_eq_sz_:
return a, b, identity, still_need_to_transpose
# At this point, we're flipping for sure!
# Any transposes/adjoints will happen here explicitly, rather than in calling
# code. Why? To avoid having to write separate complex code for each case.
if adjoint_a:
a = _linalg.matrix_transpose(a, conjugate=True)
elif transpose_a:
a = _linalg.matrix_transpose(a, conjugate=False)
if adjoint_b:
b = _linalg.matrix_transpose(b, conjugate=True)
elif transpose_a:
b = _linalg.matrix_transpose(b, conjugate=False)
still_need_to_transpose = False
# Recompute shapes, since the transpose/adjoint may have changed them.
b_extra_sh = array_ops.shape(b)[:b_extra_ndims]
b_main_sh = array_ops.shape(b)[b_extra_ndims:]
# Permutation to put the extra dims at the end.
perm = (
np.concatenate(
(np.arange(b_extra_ndims, tensor_shape.TensorShape(b.shape).ndims),
np.arange(0, b_extra_ndims)), 0))
b_extra_on_end = array_ops.transpose(b, perm=perm)
# Now squash this end into one long dim.
b_squashed_end = array_ops.reshape(
b_extra_on_end, array_ops.concat((b_main_sh[:-1], [-1]), 0))
def reshape_inv(y):
# Expand the extra dims hanging off the end, "b_extra_sh".
# Note we use y_sh[:-1] + [b_main_sh[-1]] rather than b_main_sh, because y
# Could have different batch dims than a and b, because of broadcasting.
y_extra_shape = array_ops.concat(
(array_ops.shape(y)[:-1], [b_main_sh[-1]], b_extra_sh), 0)
y_extra_on_end = array_ops.reshape(y, y_extra_shape)
inverse_perm = np.argsort(perm)
return array_ops.transpose(y_extra_on_end, perm=inverse_perm)
return a, b_squashed_end, reshape_inv, still_need_to_transpose
################################################################################
# Helpers for hints.
################################################################################
def use_operator_or_provided_hint_unless_contradicting(
operator, hint_attr_name, provided_hint_value, message):
"""Get combined hint in the case where operator.hint should equal hint.
Args:
operator: LinearOperator that a meta-operator was initialized with.
hint_attr_name: String name for the attribute.
provided_hint_value: Bool or None. Value passed by user in initialization.
message: Error message to print if hints contradict.
Returns:
True, False, or None.
Raises:
ValueError: If hints contradict.
"""
op_hint = getattr(operator, hint_attr_name)
# pylint: disable=g-bool-id-comparison
if op_hint is False and provided_hint_value:
raise ValueError(message)
if op_hint and provided_hint_value is False:
raise ValueError(message)
if op_hint or provided_hint_value:
return True
if op_hint is False or provided_hint_value is False:
return False
# pylint: disable=g-bool-id-comparison
return None
################################################################################
# Utilities for blockwise operators.
################################################################################
def arg_is_blockwise(block_dimensions, arg, arg_split_dim):
"""Detect if input should be interpreted as a list of blocks."""
# Tuples and lists of length equal to the number of operators may be
# blockwise.
if (isinstance(arg, (tuple, list)) and len(arg) == len(block_dimensions)):
# If the elements of the iterable are not nested, interpret the input as
# blockwise.
if not any(nest.is_nested(x) for x in arg):
return True
else:
arg_dims = [ops.convert_to_tensor(
x).shape[arg_split_dim] for x in arg]
self_dims = [dim.value for dim in block_dimensions]
# If none of the operator dimensions are known, interpret the input as
# blockwise if its matching dimensions are unequal.
if all(self_d is None for self_d in self_dims):
# A nested tuple/list with a single outermost element is not blockwise
if len(arg_dims) == 1:
return False
elif any(dim != arg_dims[0] for dim in arg_dims):
return True
else:
raise ValueError(
"Parsing of the input structure is ambiguous. Please input "
"a blockwise iterable of `Tensor`s or a single `Tensor`.")
# If input dimensions equal the respective (known) blockwise operator
# dimensions, then the input is blockwise.
if all(self_d == arg_d or self_d is None
for self_d, arg_d in zip(self_dims, arg_dims)):
return True
# If input dimensions equals are all equal, and are greater than or equal
# to the sum of the known operator dimensions, interpret the input as
# blockwise.
# input is not blockwise.
self_dim = sum(self_d for self_d in self_dims if self_d is not None)
if all(s == arg_dims[0] for s in arg_dims) and arg_dims[0] >= self_dim:
return False
# If none of these conditions is met, the input shape is mismatched.
raise ValueError("Input dimension does not match operator dimension.")
else:
return False
def split_arg_into_blocks(block_dims, block_dims_fn, arg, axis=-1):
"""Split `x` into blocks matching `operators`'s `domain_dimension`.
Specifically, if we have a blockwise lower-triangular matrix, with block
sizes along the diagonal `[M_j, M_j] j = 0,1,2..J`, this method splits `arg`
on `axis` into `J` tensors, whose shape at `axis` is `M_j`.
Args:
block_dims: Iterable of `TensorShapes`.
block_dims_fn: Callable returning an iterable of `Tensor`s.
arg: `Tensor`. `arg` is split into `J` tensors.
axis: Python `Integer` representing the axis to split `arg` on.
Returns:
A list of `Tensor`s.
"""
block_sizes = [dim.value for dim in block_dims]
if any(d is None for d in block_sizes):
block_sizes = block_dims_fn()
return array_ops.split(arg, block_sizes, axis=axis)
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import linalg_impl as _linalg
from tensorflow_probability.python.internal.backend.numpy import ops as _ops
from tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape
JAX_MODE = False
if JAX_MODE:
def shape_tensor(shape, name=None): # pylint: disable=unused-argument,function-redefined
import numpy as onp
try:
return onp.array(tuple(int(x) for x in shape), dtype=np.int32)
except: # JAX raises raw Exception on __array__ # pylint: disable=bare-except
pass
return onp.array(int(shape), dtype=np.int32)
from tensorflow_probability.python.internal.backend.numpy import private
distribution_util = private.LazyLoader(
"distribution_util", globals(),
"tensorflow_probability.substrates.numpy.internal.distribution_util")
tensorshape_util = private.LazyLoader(
"tensorshape_util", globals(),
"tensorflow_probability.substrates.numpy.internal.tensorshape_util")
| apache-2.0 |
lotan/rhythmbox-ampache | AmpacheConfigDialog.py | 1 | 2578 | # vim: expandtab shiftwidth=8 softtabstop=8 tabstop=8
#
# (c) 2010
# envyseapets@gmail.com
# grindlay@gmail.com
# langdalepl@gmail.com
# massimo.mund@googlemail.com
# bethebunny@gmail.com,
# 2012-2015 lotan_rm@gmx.de
#
# This file is part of the Rhythmbox Ampache plugin.
#
# The Rhythmbox Ampache plugin is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# The Rhythmbox Ampache plugin is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Rhythmbox Ampache plugin. If not, see
# <http://www.gnu.org/licenses/>.
import rb
from gi.repository import GObject, Gtk, Gio, PeasGtk
class AmpacheConfigDialog(GObject.Object, PeasGtk.Configurable):
__gtype_name__ = 'AmpacheConfigDialog'
object = GObject.property(type=GObject.Object)
def do_create_configure_widget(self):
self.settings = Gio.Settings('org.gnome.rhythmbox.plugins.ampache')
self.ui = Gtk.Builder()
self.ui.add_from_file(rb.find_plugin_file(self, 'ampache-prefs.ui'))
self.config_dialog = self.ui.get_object('config')
self.url = self.ui.get_object("url_entry")
self.url.set_text(self.settings['url'])
self.username = self.ui.get_object("username_entry")
self.username.set_text(self.settings['username'])
self.password = self.ui.get_object("password_entry")
self.password.set_visibility(False)
self.password.set_text(self.settings['password'])
self.url.connect('changed', self.url_changed_cb)
self.username.connect('changed', self.username_changed_cb)
self.password.connect('changed', self.password_changed_cb)
return self.config_dialog
def url_changed_cb(self, widget):
self.settings['url'] = self.url.get_text()
def username_changed_cb(self, widget):
self.settings['username'] = self.username.get_text()
def password_changed_cb(self, widget):
self.settings['password'] = self.password.get_text()
| gpl-2.0 |
henry-ajere/rad2py | psp2py/controllers/appadmin.py | 14 | 13329 | # -*- coding: utf-8 -*-
# ##########################################################
# ## make sure administrator is on localhost
# ###########################################################
import os
import socket
import datetime
import copy
import gluon.contenttype
import gluon.fileutils
# ## critical --- make a copy of the environment
global_env = copy.copy(globals())
global_env['datetime'] = datetime
http_host = request.env.http_host.split(':')[0]
remote_addr = request.env.remote_addr
try:
hosts = (http_host, socket.gethostname(),
socket.gethostbyname(http_host),
'::1','127.0.0.1','::ffff:127.0.0.1')
except:
hosts = (http_host, )
if request.env.http_x_forwarded_for or request.env.wsgi_url_scheme\
in ['https', 'HTTPS']:
session.secure()
elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1"):
raise HTTP(200, T('appadmin is disabled because insecure channel'))
if (request.application=='admin' and not session.authorized) or \
(request.application!='admin' and not gluon.fileutils.check_credentials(request)):
redirect(URL('admin', 'default', 'index'))
ignore_rw = True
response.view = 'appadmin.html'
response.menu = [[T('design'), False, URL('admin', 'default', 'design',
args=[request.application])], [T('db'), False,
URL('index')], [T('state'), False,
URL('state')], [T('cache'), False,
URL('ccache')]]
# ##########################################################
# ## auxiliary functions
# ###########################################################
def get_databases(request):
dbs = {}
for (key, value) in global_env.items():
cond = False
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
databases = get_databases(None)
def eval_in_global_env(text):
exec ('_ret=%s' % text, {}, global_env)
return global_env['_ret']
def get_database(request):
if request.args and request.args[0] in databases:
return eval_in_global_env(request.args[0])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_table(request):
db = get_database(request)
if len(request.args) > 1 and request.args[1] in db.tables:
return (db, request.args[1])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_query(request):
try:
return eval_in_global_env(request.vars.query)
except Exception:
return None
def query_by_table_type(tablename,db,request=request):
keyed = hasattr(db[tablename],'_primarykey')
if keyed:
firstkey = db[tablename][db[tablename]._primarykey[0]]
cond = '>0'
if firstkey.type in ['string', 'text']:
cond = '!=""'
qry = '%s.%s.%s%s' % (request.args[0], request.args[1], firstkey.name, cond)
else:
qry = '%s.%s.id>0' % tuple(request.args[:2])
return qry
# ##########################################################
# ## list all databases and tables
# ###########################################################
def index():
return dict(databases=databases)
# ##########################################################
# ## insert a new record
# ###########################################################
def insert():
(db, table) = get_table(request)
form = SQLFORM(db[table], ignore_rw=ignore_rw)
if form.accepts(request.vars, session):
response.flash = T('new record inserted')
return dict(form=form,table=db[table])
# ##########################################################
# ## list all records in table and insert new record
# ###########################################################
def download():
import os
db = get_database(request)
return response.download(request,db)
def csv():
import gluon.contenttype
response.headers['Content-Type'] = \
gluon.contenttype.contenttype('.csv')
db = get_database(request)
query = get_query(request)
if not query:
return None
response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\
% tuple(request.vars.query.split('.')[:2])
return str(db(query).select())
def import_csv(table, file):
table.import_from_csv_file(file)
def select():
import re
db = get_database(request)
dbname = request.args[0]
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)')
if len(request.args)>1 and hasattr(db[request.args[1]],'_primarykey'):
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)')
if request.vars.query:
match = regex.match(request.vars.query)
if match:
request.vars.query = '%s.%s.%s==%s' % (request.args[0],
match.group('table'), match.group('field'),
match.group('value'))
else:
request.vars.query = session.last_query
query = get_query(request)
if request.vars.start:
start = int(request.vars.start)
else:
start = 0
nrows = 0
stop = start + 100
table = None
rows = []
orderby = request.vars.orderby
if orderby:
orderby = dbname + '.' + orderby
if orderby == session.last_orderby:
if orderby[0] == '~':
orderby = orderby[1:]
else:
orderby = '~' + orderby
session.last_orderby = orderby
session.last_query = request.vars.query
form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px',
_name='query', _value=request.vars.query or '',
requires=IS_NOT_EMPTY(error_message=T("Cannot be empty")))), TR(T('Update:'),
INPUT(_name='update_check', _type='checkbox',
value=False), INPUT(_style='width:400px',
_name='update_fields', _value=request.vars.update_fields
or '')), TR(T('Delete:'), INPUT(_name='delete_check',
_class='delete', _type='checkbox', value=False), ''),
TR('', '', INPUT(_type='submit', _value='submit'))),
_action=URL(r=request,args=request.args))
if request.vars.csvfile != None:
try:
import_csv(db[request.vars.table],
request.vars.csvfile.file)
response.flash = T('data uploaded')
except Exception, e:
response.flash = DIV(T('unable to parse csv file'),PRE(str(e)))
if form.accepts(request.vars, formname=None):
# regex = re.compile(request.args[0] + '\.(?P<table>\w+)\.id\>0')
regex = re.compile(request.args[0] + '\.(?P<table>\w+)\..+')
match = regex.match(form.vars.query.strip())
if match:
table = match.group('table')
try:
nrows = db(query).count()
if form.vars.update_check and form.vars.update_fields:
db(query).update(**eval_in_global_env('dict(%s)'
% form.vars.update_fields))
response.flash = T('%s rows updated', nrows)
elif form.vars.delete_check:
db(query).delete()
response.flash = T('%s rows deleted', nrows)
nrows = db(query).count()
if orderby:
rows = db(query).select(limitby=(start, stop),
orderby=eval_in_global_env(orderby))
else:
rows = db(query).select(limitby=(start, stop))
except Exception, e:
(rows, nrows) = ([], 0)
response.flash = DIV(T('Invalid Query'),PRE(str(e)))
return dict(
form=form,
table=table,
start=start,
stop=stop,
nrows=nrows,
rows=rows,
query=request.vars.query,
)
# ##########################################################
# ## edit delete one record
# ###########################################################
def update():
(db, table) = get_table(request)
keyed = hasattr(db[table],'_primarykey')
record = None
if keyed:
key = [f for f in request.vars if f in db[table]._primarykey]
if key:
record = db(db[table][key[0]] == request.vars[key[0]]).select().first()
else:
record = db(db[table].id == request.args(2)).select().first()
if not record:
qry = query_by_table_type(table, db)
session.flash = T('record does not exist')
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
if keyed:
for k in db[table]._primarykey:
db[table][k].writable=False
form = SQLFORM(db[table], record, deletable=True, delete_label=T('Check to delete'),
ignore_rw=ignore_rw and not keyed,
linkto=URL('select',
args=request.args[:1]), upload=URL(r=request,
f='download', args=request.args[:1]))
if form.accepts(request.vars, session):
session.flash = T('done!')
qry = query_by_table_type(table, db)
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
return dict(form=form,table=db[table])
# ##########################################################
# ## get global variables
# ###########################################################
def state():
return dict()
def ccache():
form = FORM(
P(TAG.BUTTON("Clear CACHE?", _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON("Clear RAM", _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON("Clear DISK", _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
clear_ram = False
clear_disk = False
session.flash = ""
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += "Ram Cleared "
if clear_disk:
cache.disk.clear()
session.flash += "Disk Cleared"
redirect(URL(r=request))
try:
from guppy import hpy; hp=hpy()
except ImportError:
hp = False
import shelve, os, copy, time, math
from gluon import portalocker
ram = {
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time()
}
disk = copy.copy(ram)
total = copy.copy(ram)
for key, value in cache.ram.storage.items():
if isinstance(value, dict):
ram['hits'] = value['hit_total'] - value['misses']
ram['misses'] = value['misses']
try:
ram['ratio'] = ram['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
else:
if hp:
ram['bytes'] += hp.iso(value[1]).size
ram['objects'] += hp.iso(value[1]).count
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
locker = open(os.path.join(request.folder,
'cache/cache.lock'), 'a')
portalocker.lock(locker, portalocker.LOCK_EX)
disk_storage = shelve.open(os.path.join(request.folder, 'cache/cache.shelve'))
try:
for key, value in disk_storage.items():
if isinstance(value, dict):
disk['hits'] = value['hit_total'] - value['misses']
disk['misses'] = value['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if hp:
disk['bytes'] += hp.iso(value[1]).size
disk['objects'] += hp.iso(value[1]).count
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
finally:
portalocker.unlock(locker)
locker.close()
disk_storage.close()
total['bytes'] = ram['bytes'] + disk['bytes']
total['objects'] = ram['objects'] + disk['objects']
total['hits'] = ram['hits'] + disk['hits']
total['misses'] = ram['misses'] + disk['misses']
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] + total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
return dict(form=form, total=total,
ram=ram, disk=disk)
| gpl-3.0 |
xubenben/scikit-learn | sklearn/gaussian_process/regression_models.py | 259 | 2166 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
"""
The built-in regression models submodule for the gaussian_process module.
"""
import numpy as np
def constant(x):
"""
Zero order polynomial (constant, p = 1) regression model.
x --> f(x) = 1
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float)
n_eval = x.shape[0]
f = np.ones([n_eval, 1])
return f
def linear(x):
"""
First order polynomial (linear, p = n+1) regression model.
x --> f(x) = [ 1, x_1, ..., x_n ].T
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float)
n_eval = x.shape[0]
f = np.hstack([np.ones([n_eval, 1]), x])
return f
def quadratic(x):
"""
Second order polynomial (quadratic, p = n*(n-1)/2+n+1) regression model.
x --> f(x) = [ 1, { x_i, i = 1,...,n }, { x_i * x_j, (i,j) = 1,...,n } ].T
i > j
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float)
n_eval, n_features = x.shape
f = np.hstack([np.ones([n_eval, 1]), x])
for k in range(n_features):
f = np.hstack([f, x[:, k, np.newaxis] * x[:, k:]])
return f
| bsd-3-clause |
navotsil/Open-Knesset | events/migrations/0002_event_add_when_done.py | 14 | 12023 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.when_over'
db.add_column('events_event', 'when_over', self.gf('django.db.models.fields.DateTimeField')(null=True), keep_default=False)
# Adding field 'Event.when_over_guessed'
db.add_column('events_event', 'when_over_guessed', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.when_over'
db.delete_column('events_event', 'when_over')
# Deleting field 'Event.when_over_guessed'
db.delete_column('events_event', 'when_over_guessed')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'what': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'when_over': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'when_over_guessed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'where': ('django.db.models.fields.TextField', [], {}),
'which_pk': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'which_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event_for_event'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'who': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['persons.Person']", 'symmetrical': 'False'})
},
'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'persons.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['persons.Title']"})
},
'persons.title': {
'Meta': {'object_name': 'Title'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
}
}
complete_apps = ['events']
| bsd-3-clause |
robobrobro/ballin-octo-shame | lib/Python-3.4.3/Lib/encodings/cp775.py | 272 | 34476 | """ Python Character Mapping Codec cp775 generated from 'VENDORS/MICSFT/PC/CP775.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp775',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0096: 0x00a2, # CENT SIGN
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x00a4, # CURRENCY SIGN
0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00a7: 0x00a6, # BROKEN BAR
0x00a8: 0x00a9, # COPYRIGHT SIGN
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON
'\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA
'\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA
'\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA
'\xa2' # 0x0096 -> CENT SIGN
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
'\xa3' # 0x009c -> POUND SIGN
'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\xa4' # 0x009f -> CURRENCY SIGN
'\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON
'\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE
'\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK
'\xa6' # 0x00a7 -> BROKEN BAR
'\xa9' # 0x00a8 -> COPYRIGHT SIGN
'\xae' # 0x00a9 -> REGISTERED SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON
'\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK
'\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON
'\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK
'\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON
'\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK
'\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE
'\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK
'\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON
'\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK
'\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON
'\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
'\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xb5' # 0x00e6 -> MICRO SIGN
'\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE
'\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA
'\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA
'\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA
'\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON
'\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK
'\xad' # 0x00f0 -> SOFT HYPHEN
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK
'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
'\xb6' # 0x00f4 -> PILCROW SIGN
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\xb9' # 0x00fb -> SUPERSCRIPT ONE
'\xb3' # 0x00fc -> SUPERSCRIPT THREE
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x0096, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x009f, # CURRENCY SIGN
0x00a6: 0x00a7, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a9: 0x00a8, # COPYRIGHT SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON
0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON
0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON
0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON
0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON
0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE
0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK
0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA
0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON
0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON
0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK
0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK
0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA
0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA
0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA
0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA
0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE
0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA
0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA
0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON
0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON
0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA
0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON
0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON
0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON
0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK
0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON
0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK
0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK
0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK
0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK
0x2219: 0x00f9, # BULLET OPERATOR
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
hkawasaki/kawasaki-aio8-0 | common/djangoapps/config_models/__init__.py | 220 | 2002 | """
Model-Based Configuration
=========================
This app allows other apps to easily define a configuration model
that can be hooked into the admin site to allow configuration management
with auditing.
Installation
------------
Add ``config_models`` to your ``INSTALLED_APPS`` list.
Usage
-----
Create a subclass of ``ConfigurationModel``, with fields for each
value that needs to be configured::
class MyConfiguration(ConfigurationModel):
frobble_timeout = IntField(default=10)
frazzle_target = TextField(defalut="debug")
This is a normal django model, so it must be synced and migrated as usual.
The default values for the fields in the ``ConfigurationModel`` will be
used if no configuration has yet been created.
Register that class with the Admin site, using the ``ConfigurationAdminModel``::
from django.contrib import admin
from config_models.admin import ConfigurationModelAdmin
admin.site.register(MyConfiguration, ConfigurationModelAdmin)
Use the configuration in your code::
def my_view(self, request):
config = MyConfiguration.current()
fire_the_missiles(config.frazzle_target, timeout=config.frobble_timeout)
Use the admin site to add new configuration entries. The most recently created
entry is considered to be ``current``.
Configuration
-------------
The current ``ConfigurationModel`` will be cached in the ``configuration`` django cache,
or in the ``default`` cache if ``configuration`` doesn't exist. You can specify the cache
timeout in each ``ConfigurationModel`` by setting the ``cache_timeout`` property.
You can change the name of the cache key used by the ``ConfigurationModel`` by overriding
the ``cache_key_name`` function.
Extension
---------
``ConfigurationModels`` are just django models, so they can be extended with new fields
and migrated as usual. Newly added fields must have default values and should be nullable,
so that rollbacks to old versions of configuration work correctly.
"""
| agpl-3.0 |
AMICI-developer/AMICI | python/tests/test_parameter_mapping.py | 3 | 1869 | """Test for ``amici.parameter_mapping``"""
from amici.parameter_mapping import (
ParameterMappingForCondition, ParameterMapping)
def test_parameter_mapping_for_condition_default_args():
"""Check we can initialize the mapping with default arguments."""
par_map_for_condition = ParameterMappingForCondition()
for attr in [
'map_sim_var', 'scale_map_sim_var', 'map_preeq_fix',
'scale_map_preeq_fix', 'map_sim_fix', 'scale_map_sim_fix']:
assert not getattr(par_map_for_condition, attr)
map_sim_var = {'sim_par0': 8, 'sim_par1': 'opt_par0'}
map_preeq_fix = {'sim_par2': 'opt_par1'}
map_sim_fix = {'sim_par2': 'opt_par2'}
par_map_for_condition = ParameterMappingForCondition(
map_sim_var=map_sim_var, map_preeq_fix=map_preeq_fix,
map_sim_fix=map_sim_fix)
expected_scale_map_sim_var = {'sim_par0': 'lin', 'sim_par1': 'lin'}
expected_scale_map_preeq_fix = {'sim_par2': 'lin'}
expected_scale_map_sim_fix = {'sim_par2': 'lin'}
assert par_map_for_condition.scale_map_sim_var == \
expected_scale_map_sim_var
assert par_map_for_condition.scale_map_preeq_fix == \
expected_scale_map_preeq_fix
assert par_map_for_condition.scale_map_sim_fix == \
expected_scale_map_sim_fix
def test_parameter_mapping():
"""Test :class:``amici.parameter_mapping.ParameterMapping``."""
parameter_mapping = ParameterMapping()
assert len(parameter_mapping) == 0
map_sim_var = {'sim_par0': 8, 'sim_par1': 'opt_par0'}
map_preeq_fix = {'sim_par2': 'opt_par1'}
map_sim_fix = {'sim_par2': 'opt_par2'}
par_map_for_condition = ParameterMappingForCondition(
map_sim_var=map_sim_var, map_preeq_fix=map_preeq_fix,
map_sim_fix=map_sim_fix)
parameter_mapping.append(par_map_for_condition)
assert len(parameter_mapping) == 1
| bsd-2-clause |
serverdensity/sd-agent-core-plugins | hdfs_namenode/test_hdfs_namenode.py | 6 | 3525 | # stdlib
import os
# Project
from tests.checks.common import AgentCheckTest, Fixtures
# 3rd Party
import mock
import json
# Namenode URI
NAMENODE_JMX_URI = 'http://localhost:50070/jmx'
# Namesystem state URL
NAME_SYSTEM_STATE_URL = NAMENODE_JMX_URI + '?qry=Hadoop:service=NameNode,name=FSNamesystemState'
# Namesystem url
NAME_SYSTEM_URL = NAMENODE_JMX_URI + '?qry=Hadoop:service=NameNode,name=FSNamesystem'
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'ci')
def requests_get_mock(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
print self.json_data
return json.loads(self.json_data)
def raise_for_status(self):
return True
print 'DEBUG: {0}'.format(args[0])
print NAME_SYSTEM_STATE_URL
if args[0] == NAME_SYSTEM_STATE_URL:
print 'here'
with open(Fixtures.file('hdfs_namesystem_state', sdk_dir=FIXTURE_DIR), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif args[0] == NAME_SYSTEM_URL:
print 'here'
with open(Fixtures.file('hdfs_namesystem', sdk_dir=FIXTURE_DIR), 'r') as f:
body = f.read()
return MockResponse(body, 200)
class HDFSNameNode(AgentCheckTest):
CHECK_NAME = 'hdfs_namenode'
HDFS_NAMENODE_CONFIG = {
'hdfs_namenode_jmx_uri': 'http://localhost:50070'
}
HDFS_NAMESYSTEM_STATE_METRICS_VALUES = {
'hdfs.namenode.capacity_total': 41167421440,
'hdfs.namenode.capacity_used': 501932032,
'hdfs.namenode.capacity_remaining': 27878948864,
'hdfs.namenode.capacity_in_use': None, # Don't test the value as it's a float
'hdfs.namenode.total_load': 2,
'hdfs.namenode.fs_lock_queue_length': 0,
'hdfs.namenode.blocks_total': 27661,
'hdfs.namenode.max_objects': 0,
'hdfs.namenode.files_total': 82950,
'hdfs.namenode.pending_replication_blocks': 0,
'hdfs.namenode.under_replicated_blocks': 27661,
'hdfs.namenode.scheduled_replication_blocks': 0,
'hdfs.namenode.pending_deletion_blocks': 0,
'hdfs.namenode.num_live_data_nodes': 1,
'hdfs.namenode.num_dead_data_nodes': 0,
'hdfs.namenode.num_decom_live_data_nodes': 0,
'hdfs.namenode.num_decom_dead_data_nodes': 0,
'hdfs.namenode.volume_failures_total': 0,
'hdfs.namenode.estimated_capacity_lost_total': 0,
'hdfs.namenode.num_decommissioning_data_nodes': 0,
'hdfs.namenode.num_stale_data_nodes': 0,
'hdfs.namenode.num_stale_storages': 0,
}
HDFS_NAMESYSTEM_METRICS_VALUES = {
'hdfs.namenode.missing_blocks': 0,
'hdfs.namenode.corrupt_blocks': 1,
}
HDFS_NAMESYSTEM_METRIC_TAGS = [
'namenode_url:' + HDFS_NAMENODE_CONFIG['hdfs_namenode_jmx_uri']
]
@mock.patch('requests.get', side_effect=requests_get_mock)
def test_check(self, mock_requests):
config = {
'instances': [self.HDFS_NAMENODE_CONFIG]
}
self.run_check(config)
for metric, value in self.HDFS_NAMESYSTEM_STATE_METRICS_VALUES.iteritems():
self.assertMetric(metric, value=value, tags=self.HDFS_NAMESYSTEM_METRIC_TAGS)
for metric, value in self.HDFS_NAMESYSTEM_METRICS_VALUES.iteritems():
self.assertMetric(metric, value=value, tags=self.HDFS_NAMESYSTEM_METRIC_TAGS)
| bsd-3-clause |
valentin-krasontovitsch/ansible | lib/ansible/modules/storage/zfs/zfs.py | 55 | 9554 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zfs
short_description: Manage zfs
description:
- Manages ZFS file systems, volumes, clones and snapshots
version_added: "1.1"
options:
name:
description:
- File system, snapshot or volume name e.g. C(rpool/myfs).
required: true
state:
description:
- Whether to create (C(present)), or remove (C(absent)) a
file system, snapshot or volume. All parents/children
will be created/destroyed as needed to reach the desired state.
choices: [ absent, present ]
required: true
origin:
description:
- Snapshot from which to create a clone.
key_value:
description:
- (**DEPRECATED**) This will be removed in Ansible-2.9. Set these values in the
- C(extra_zfs_properties) option instead.
- The C(zfs) module takes key=value pairs for zfs properties to be set.
- See the zfs(8) man page for more information.
extra_zfs_properties:
description:
- A dictionary of zfs properties to be set.
- See the zfs(8) man page for more information.
version_added: "2.5"
author:
- Johan Wiren (@johanwiren)
'''
EXAMPLES = '''
- name: Create a new file system called myfs in pool rpool with the setuid property turned off
zfs:
name: rpool/myfs
state: present
extra_zfs_properties:
setuid: off
- name: Create a new volume called myvol in pool rpool.
zfs:
name: rpool/myvol
state: present
extra_zfs_properties:
volsize: 10M
- name: Create a snapshot of rpool/myfs file system.
zfs:
name: rpool/myfs@mysnapshot
state: present
- name: Create a new file system called myfs2 with snapdir enabled
zfs:
name: rpool/myfs2
state: present
extra_zfs_properties:
snapdir: enabled
- name: Create a new file system by cloning a snapshot
zfs:
name: rpool/cloned_fs
state: present
origin: rpool/myfs@mysnapshot
- name: Destroy a filesystem
zfs:
name: rpool/myfs
state: absent
'''
import os
from ansible.module_utils.basic import AnsibleModule
class Zfs(object):
def __init__(self, module, name, properties):
self.module = module
self.name = name
self.properties = properties
self.changed = False
self.zfs_cmd = module.get_bin_path('zfs', True)
self.zpool_cmd = module.get_bin_path('zpool', True)
self.pool = name.split('/')[0]
self.is_solaris = os.uname()[0] == 'SunOS'
self.is_openzfs = self.check_openzfs()
self.enhanced_sharing = self.check_enhanced_sharing()
def check_openzfs(self):
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if version == '-':
return True
if int(version) == 5000:
return True
return False
def check_enhanced_sharing(self):
if self.is_solaris and not self.is_openzfs:
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if int(version) >= 34:
return True
return False
def exists(self):
cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create(self):
if self.module.check_mode:
self.changed = True
return
properties = self.properties
origin = self.module.params.get('origin', None)
cmd = [self.zfs_cmd]
if "@" in self.name:
action = 'snapshot'
elif origin:
action = 'clone'
else:
action = 'create'
cmd.append(action)
if action in ['create', 'clone']:
cmd += ['-p']
if properties:
for prop, value in properties.items():
if prop == 'volsize':
cmd += ['-V', value]
elif prop == 'volblocksize':
cmd += ['-b', value]
else:
cmd += ['-o', '%s="%s"' % (prop, value)]
if origin and action == 'clone':
cmd.append(origin)
cmd.append(self.name)
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def destroy(self):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_property(self, prop, value):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_properties_if_changed(self):
current_properties = self.get_current_properties()
for prop, value in self.properties.items():
if current_properties.get(prop, None) != value:
self.set_property(prop, value)
def get_current_properties(self):
cmd = [self.zfs_cmd, 'get', '-H']
if self.enhanced_sharing:
cmd += ['-e']
cmd += ['all', self.name]
rc, out, err = self.module.run_command(" ".join(cmd))
properties = dict()
for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]:
if source == 'local':
properties[prop] = value
# Add alias for enhanced sharing properties
if self.enhanced_sharing:
properties['sharenfs'] = properties.get('share.nfs', None)
properties['sharesmb'] = properties.get('share.smb', None)
return properties
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'present']),
origin=dict(type='str', default=None),
# createparent is meaningless after 2.3, but this shouldn't
# be removed until check_invalid_arguments is.
createparent=dict(type='bool', default=None),
extra_zfs_properties=dict(type='dict', default={}),
),
supports_check_mode=True,
# Remove this in Ansible 2.9
check_invalid_arguments=False,
)
state = module.params.get('state')
name = module.params.get('name')
if module.params.get('origin') and '@' in name:
module.fail_json(msg='cannot specify origin when operating on a snapshot')
# The following is deprecated. Remove in Ansible 2.9
# Get all valid zfs-properties
properties = dict()
for prop, value in module.params.items():
# All freestyle params are zfs properties
if prop not in module.argument_spec:
if isinstance(value, bool):
if value is True:
properties[prop] = 'on'
else:
properties[prop] = 'off'
else:
properties[prop] = value
if properties:
module.deprecate('Passing zfs properties as arbitrary parameters to the zfs module is'
' deprecated. Send them as a dictionary in the extra_zfs_properties'
' parameter instead.', version='2.9')
# Merge, giving the module_params precedence
for prop, value in module.params['extra_zfs_properties'].items():
properties[prop] = value
module.params['extra_zfs_properties'] = properties
# End deprecated section
# Reverse the boolification of zfs properties
for prop, value in module.params['extra_zfs_properties'].items():
if isinstance(value, bool):
if value is True:
module.params['extra_zfs_properties'][prop] = 'on'
else:
module.params['extra_zfs_properties'][prop] = 'off'
else:
module.params['extra_zfs_properties'][prop] = value
result = dict(
name=name,
state=state,
)
zfs = Zfs(module, name, module.params['extra_zfs_properties'])
if state == 'present':
if zfs.exists():
zfs.set_properties_if_changed()
else:
zfs.create()
elif state == 'absent':
if zfs.exists():
zfs.destroy()
result.update(zfs.properties)
result['changed'] = zfs.changed
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
zerotk/terraformer | zerotk/fifo.py | 1 | 1072 | from __future__ import unicode_literals
from collections import OrderedDict
class FIFO(OrderedDict):
"""
This is a First in, First out cache, so, when the maximum size is reached, the first item added
is removed.
"""
def __init__(self, maxsize):
"""
:param int maxsize:
The maximum size of this cache.
"""
OrderedDict.__init__(self)
self._maxsize = maxsize
def __setitem__(self, key, value):
"""
Sets an item in the cache. Pops items as needed so that the max size is never passed.
:param object key:
Key to be set
:param object value:
Corresponding value to be set for the given key
"""
l = len(self)
# Note, we must pop items before adding the new one to the cache so that
# the size does not exceed the maximum at any time.
while l >= self._maxsize:
l -= 1
# Pop the first item created
self.popitem(0)
OrderedDict.__setitem__(self, key, value)
| mit |
Dm47021/Holo-a200 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
antgonza/qiime | qiime/denoiser/preprocess.py | 15 | 11459 | #!/usr/bin/env python
"""Preprocess 454 sequencing data."""
__author__ = "Jens Reeder"
__copyright__ = "Copyright 2011, The QIIME Project"
# remember to add yourself if you make changes
__credits__ = ["Jens Reeder", "Rob Knight", "Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jens Reeder"
__email__ = "jens.reeder@gmail.com"
from itertools import imap
from os import remove, close
from random import sample
from collections import defaultdict
from string import lowercase
from tempfile import mkstemp
from skbio.parse.sequences import parse_fasta
from bfillings.denoiser import (Flowgram, build_averaged_flowgram,
lazy_parse_sff_handle, build_prefix_map)
from qiime.util import load_qiime_config
from qiime.denoiser.cluster_utils import submit_jobs
from qiime.denoiser.flowgram_filter import cleanup_sff,\
truncate_flowgrams_in_SFF, extract_barcodes_from_mapping
from qiime.denoiser.utils import squeeze_seq, make_stats, get_representatives,\
wait_for_file, store_mapping, invert_mapping, cat_sff_files, files_exist,\
read_denoiser_mapping, get_denoiser_data_dir, write_sff_header
STANDARD_BACTERIAL_PRIMER = "CATGCTGCCTCCCGTAGGAGT"
def make_tmp_name(length=8):
"""Returns a random string of specified length.
length: length of random string
"""
return ("".join(sample(list(lowercase), length)))
def sample_mapped_keys(mapping, min_coverage=50):
"""sample up to min_coverage keys for each key in mapping.
mapping: dictionary of lists.
Note: key is always included in sample
"""
if min_coverage == 0:
return {}
sample_keys = {}
for key in mapping.keys():
if (min_coverage > 1):
sample_keys[key] = sample(mapping[key],
min(min_coverage - 1, len(mapping[key])))
else:
sample_keys[key] = []
sample_keys[key].append(key) # always include the centroid
return sample_keys
def build_averaged_flowgrams(mapping, sff_fp,
min_coverage=50, out_fp=None):
"""Build averaged flowgrams for each cluster in mapping.
mapping: a cluster mapping as dictionary of lists
sff_fp: pointer to sff.txt file, must be consistent with mapping
min_coverage: number of flowgrams to average over for each cluster
out_fp: ouput file name
NOTE: This function has no test code, since it is mostly IO around tested functions
"""
l = len(mapping)
(flowgrams, header) = lazy_parse_sff_handle(open(sff_fp))
# update some values in the sff header
header["# of Reads"] = l
header["Index Length"] = "NA"
if (out_fp):
out_filename = out_fp
else:
fd, out_filename = mkstemp(dir="/tmp/",
prefix="prefix_dereplicated",
suffix=".sff.txt")
close(fd)
outhandle = open(out_filename, "w")
# write out reduced flogram set
write_sff_header(header, outhandle)
seqs = {}
# get a random sample for each cluster
sample_keys = sample_mapped_keys(mapping, min_coverage)
for ave_f, id in _average_flowgrams(mapping, flowgrams, sample_keys):
outhandle.write(ave_f.createFlowHeader() + "\n")
ave_f.Bases = ave_f.toSeq()
seqs[id] = ave_f.Bases
outhandle.close()
return(out_filename, seqs)
def _average_flowgrams(mapping, flowgrams, sample_keys):
"""average flowgrams according to cluster mapping.
mapping: a dictionary of lists as cluster mapping
flowgrams: an iterable flowgram source, all flowgram ids from this source must be in the mapping
sample_keys: the keys that should be averaged over for each cluster.
"""
# accumulates flowgram for each key until sample for this key is empty
flows = defaultdict(list)
invert_map = invert_mapping(mapping)
for f in flowgrams:
key = invert_map[f.Name]
samples = sample_keys[key]
if (f.Name in samples):
flows[key].append(f.flowgram)
samples.remove(f.Name)
if (len(samples) == 0):
# we gathered all sampled flowgrams for this cluster,
# now average
ave_flowgram = build_averaged_flowgram(flows[key])
ave_f = Flowgram(ave_flowgram, Name=key)
del(flows[key])
yield ave_f, key
def prefix_filter_flowgrams(flowgrams, squeeze=False):
"""Filters flowgrams by common prefixes.
flowgrams: iterable source of flowgrams
squeeze: if True, collapse all poly-X to X
Returns prefix mapping.
"""
# collect flowgram sequences
if squeeze:
seqs = imap(
lambda f: (f.Name, squeeze_seq(str(f.toSeq(truncate=True)))),
flowgrams)
else:
seqs = imap(lambda f: (f.Name, str(f.toSeq(truncate=True))), flowgrams)
# equivalent but more efficient than
#seqs = [(f.Name, str(f.toSeq(truncate=True))) for f in flowgrams]
# get prefix mappings
mapping = build_prefix_map(seqs)
l = len(mapping)
orig_l = sum([len(a) for a in mapping.values()]) + l
return (l, orig_l, mapping)
def print_rep_seqs(mapping, seqs, out_fp):
"""Print the cluster seeds of a mapping to out_fp.
mapping: a cluster mapping
seqs: a list of seqs contained in the mapping
out_fp: output directory
"""
out_fh = open(out_fp + "/prefix_dereplicated.fasta", "w")
for s in (get_representatives(mapping, seqs.iteritems())):
out_fh.write(s.to_fasta())
out_fh.close()
def preprocess(sff_fps, log_fh, fasta_fp=None, out_fp="/tmp/",
verbose=False, squeeze=False,
primer=STANDARD_BACTERIAL_PRIMER):
"""Quality filtering and truncation of flowgrams, followed by denoiser phase I.
sff_fps: List of paths to flowgram files
log_fh: log messages are written to log_fh if it is set to something else than None
fasta_fp: Path to fasta file, formatted as from split_libraries.py.
This files is used to filter the flowgrams in sff_fps. Only reads in
fasta_fp are pulled from sff_fps.
out_fp: path to output directory
verbose: a binary verbose flag
squeeze: a flag that controls if sequences are squeezed before phase I.
Squeezing means consecutive identical nucs are collapsed to one.
primer: The primer sequences of the amplification process. This seq will be
removed from all reads during the preprocessing
"""
flowgrams, header = cat_sff_files(map(open, sff_fps))
if(fasta_fp):
# remove barcodes and sequences tossed by split_libraries, i.e. not in
# fasta_fp
labels = imap(lambda a_b: a_b[0], parse_fasta(open(fasta_fp)))
barcode_mapping = extract_barcodes_from_mapping(labels)
(trunc_sff_fp, l) = truncate_flowgrams_in_SFF(flowgrams, header,
outdir=out_fp,
barcode_mapping=barcode_mapping,
primer=primer)
if verbose:
log_fh.write(
"Sequences in barcode mapping: %d\n" %
len(barcode_mapping))
log_fh.write("Truncated flowgrams written: %d\n" % l)
else:
# just do a simple clean and truncate
(clean_sff_fp, l) = cleanup_sff(flowgrams, header, outdir=out_fp)
if verbose:
log_fh.write("Cleaned flowgrams written: %d\n" % l)
flowgrams, header = lazy_parse_sff_handle(open(clean_sff_fp))
(trunc_sff_fp, l) = truncate_flowgrams_in_SFF(flowgrams, header,
outdir=out_fp, primer=primer)
if verbose:
log_fh.write("Truncated flowgrams written: %d\n" % l)
remove(clean_sff_fp)
if (l == 0):
raise ValueError("No flowgrams left after preprocesing.\n" +
"Check your primer sequence")
# Phase I - cluster seqs which are exact prefixe
if verbose:
log_fh.write("Filter flowgrams by prefix matching\n")
(flowgrams, header) = lazy_parse_sff_handle(open(trunc_sff_fp))
l, orig_l, mapping =\
prefix_filter_flowgrams(flowgrams, squeeze=squeeze)
averaged_sff_fp, seqs = build_averaged_flowgrams(mapping, trunc_sff_fp,
min_coverage=1,
# averaging produces too good flowgrams
# such that the greedy clustering clusters too much.
# Use the cluster centroid
# instead by using
# min_coverage 1
out_fp=out_fp + "/prefix_dereplicated.sff.txt")
remove(trunc_sff_fp)
if verbose:
log_fh.write("Prefix matching: removed %d out of %d seqs\n"
% (orig_l - l, orig_l))
log_fh.write("Remaining number of sequences: %d\n" % l)
log_fh.write(make_stats(mapping) + "\n")
# print representative sequences and mapping
print_rep_seqs(mapping, seqs, out_fp)
store_mapping(mapping, out_fp, "prefix")
return (averaged_sff_fp, l, mapping, seqs)
def preprocess_on_cluster(sff_fps, log_fp, fasta_fp=None, out_fp="/tmp/",
squeeze=False, verbose=False,
primer=STANDARD_BACTERIAL_PRIMER):
"""Call preprocess via cluster_jobs_script on the cluster.
sff_fps: List of paths to flowgram files.
log_fp: path to log file
fasta_fp: Path to fasta file, formatted as from split_libraries.py.
This files is used to filter the flowgrams in sff_fps. Only reads in
fasta_fp are pulled from sff_fps.
out_fp: path to output directory
verbose: a binary verbose flag
squeeze: a flag that controls if sequences are squeezed before phase I.
Squeezing means consecutive identical nucs are collapsed to one.
primer: The primer sequences of the amplification process. This seq will be
removed from all reads during the preprocessing
"""
cmd = "denoiser_preprocess.py -i %s -l %s -o %s" %\
(",".join(sff_fps), log_fp, out_fp)
if (fasta_fp):
cmd += " -f %s" % fasta_fp
if(squeeze):
cmd += " -s"
if verbose:
cmd += " -v"
if primer:
cmd += " -p %s" % primer
submit_jobs([cmd], "pp_" + make_tmp_name(6))
wait_for_file(out_fp + "/prefix_mapping.txt", 10)
def read_preprocessed_data(out_fp="/tmp/"):
"""Read data of a previous preprocessing run.
out_fp: output directory of previous preprocess run.
Supposed to contain two files:
- prefix_dereplicated.fasta
- prefix_mapping.txt
"""
# read mapping, and extract seqs
# mapping has fasta_header like this:
# > id: count
seqs = dict([(a.split(':')[0], b) for (a, b) in
(parse_fasta(open(out_fp + "/prefix_dereplicated.fasta")))])
mapping = read_denoiser_mapping(open(out_fp + "/prefix_mapping.txt"))
return(out_fp + "/prefix_dereplicated.sff.txt", len(mapping), mapping, seqs)
| gpl-2.0 |
MrTheodor/espressopp | contrib/mpi4py/mpi4py-2.0.0/conf/mpiconfig.py | 8 | 17349 | import sys, os, platform
from distutils.util import split_quoted
from distutils.spawn import find_executable
from distutils import log as dulog
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
try:
from configparser import ConfigParser
from configparser import Error as ConfigParserError
except ImportError:
from ConfigParser import ConfigParser
from ConfigParser import Error as ConfigParserError
class Config(object):
def __init__(self, logger=None):
self.log = logger or dulog
self.section = None
self.filename = None
self.compiler_info = OrderedDict((
('mpicc' , None),
('mpicxx' , None),
('mpifort', None),
('mpif77' , None),
('mpif90' , None),
('mpif08' , None),
('mpild' , None),
))
self.library_info = OrderedDict((
('define_macros' , []),
('undef_macros' , []),
('include_dirs' , []),
('libraries' , []),
('library_dirs' , []),
('runtime_library_dirs' , []),
('extra_compile_args' , []),
('extra_link_args' , []),
('extra_objects' , []),
))
def __bool__(self):
for v in self.compiler_info.values():
if v:
return True
for v in self.library_info.values():
if v:
return True
return False
__nonzero__ = __bool__
def get(self, k, d=None):
if k in self.compiler_info:
return self.compiler_info[k]
if k in self.library_info:
return self.library_info[k]
return d
def info(self, log=None):
if log is None: log = self.log
mpicc = self.compiler_info.get('mpicc')
mpicxx = self.compiler_info.get('mpicxx')
mpifort = self.compiler_info.get('mpifort')
mpif77 = self.compiler_info.get('mpif77')
mpif90 = self.compiler_info.get('mpif90')
mpif08 = self.compiler_info.get('mpif08')
mpild = self.compiler_info.get('mpild')
if mpicc:
log.info("MPI C compiler: %s", mpicc)
if mpicxx:
log.info("MPI C++ compiler: %s", mpicxx)
if mpifort:
log.info("MPI F compiler: %s", mpifort)
if mpif77:
log.info("MPI F77 compiler: %s", mpif77)
if mpif90:
log.info("MPI F90 compiler: %s", mpif90)
if mpif08:
log.info("MPI F08 compiler: %s", mpif08)
if mpild:
log.info("MPI linker: %s", mpild)
def update(self, config, **more):
if hasattr(config, 'keys'):
config = config.items()
for option, value in config:
if option in self.compiler_info:
self.compiler_info[option] = value
if option in self.library_info:
self.library_info[option] = value
if more:
self.update(more)
def setup(self, options, environ=None):
if environ is None: environ = os.environ
self.setup_library_info(options, environ)
self.setup_compiler_info(options, environ)
def setup_library_info(self, options, environ):
filename = section = None
mpiopt = getattr(options, 'mpi', None)
mpiopt = environ.get('MPICFG', mpiopt)
if mpiopt:
if ',' in mpiopt:
section, filename = mpiopt.split(',', 1)
elif os.path.isfile(mpiopt):
filename = mpiopt
else:
section = mpiopt
if not filename: filename = "mpi.cfg"
if not section: section = "mpi"
mach = platform.machine()
arch = platform.architecture()[0]
plat = sys.platform
osnm = os.name
if 'linux' == plat[:5]: plat = 'linux'
elif 'sunos' == plat[:5]: plat = 'solaris'
elif 'win' == plat[:3]: plat = 'windows'
suffixes = []
suffixes.append(plat+'-'+mach)
suffixes.append(plat+'-'+arch)
suffixes.append(plat)
suffixes.append(osnm+'-'+mach)
suffixes.append(osnm+'-'+arch)
suffixes.append(osnm)
suffixes.append(mach)
suffixes.append(arch)
sections = [section+"-"+s for s in suffixes]
sections += [section]
self.load(filename, sections)
if not self:
if os.name == 'posix':
self._setup_posix()
if sys.platform == 'win32':
self._setup_windows()
def _setup_posix(self):
pass
def _setup_windows(self):
# Microsoft MPI (v6, v5, v4)
def msmpi_ver():
try:
try:
import winreg
except ImportError:
import _winreg as winreg
HKLM = winreg.HKEY_LOCAL_MACHINE
subkey = "SOFTWARE\Microsoft\MPI"
with winreg.OpenKey(HKLM, subkey) as key:
for i in range(winreg.QueryInfoKey(key)[1]):
name, value, type = winreg.EnumValue(key, i)
if name != "Version": continue
major, minor = value.split('.')[:2]
return (int(major), int(minor))
except: pass
return (1, 0)
def setup_msmpi(MSMPI_INC, MSMPI_LIB):
from os.path import join, isfile
ok = (MSMPI_INC and isfile(join(MSMPI_INC, 'mpi.h')) and
MSMPI_LIB and isfile(join(MSMPI_LIB, 'msmpi.lib')))
if not ok: return False
MSMPI_VER = '0x%d%02d' % msmpi_ver()
MSMPI_INC = os.path.normpath(MSMPI_INC)
MSMPI_LIB = os.path.normpath(MSMPI_LIB)
self.library_info.update(
define_macros=[('MSMPI_VER', MSMPI_VER)],
include_dirs=[MSMPI_INC],
library_dirs=[MSMPI_LIB],
libraries=['msmpi'])
self.section = 'msmpi'
self.filename = [os.path.dirname(MSMPI_INC)]
return True
arch = platform.architecture()[0][:2]
# Look for Microsoft MPI in the environment
MSMPI_INC = os.environ.get('MSMPI_INC')
MSMPI_LIB = os.environ.get('MSMPI_LIB'+arch)
if setup_msmpi(MSMPI_INC, MSMPI_LIB): return
# Look for Microsoft MPI v6/v5 in default install path
for ProgramFiles in ('ProgramFiles', 'ProgramFiles(x86)'):
ProgramFiles = os.environ.get(ProgramFiles, '')
archdir = {'32':'x86', '64':'x64'}[arch]
MSMPI_DIR = os.path.join(ProgramFiles, 'Microsoft SDKs', 'MPI')
MSMPI_INC = os.path.join(MSMPI_DIR, 'Include')
MSMPI_LIB = os.path.join(MSMPI_DIR, 'Lib', archdir)
if setup_msmpi(MSMPI_INC, MSMPI_LIB): return
# Look for Microsoft HPC Pack 2012 R2 in default install path
for ProgramFiles in ('ProgramFiles', 'ProgramFiles(x86)'):
ProgramFiles = os.environ.get(ProgramFiles, '')
archdir = {'32':'i386', '64':'amd64'}[arch]
MSMPI_DIR = os.path.join(ProgramFiles, 'Microsoft MPI')
MSMPI_INC = os.path.join(MSMPI_DIR, 'Inc')
MSMPI_LIB = os.path.join(MSMPI_DIR, 'Lib', archdir)
if setup_msmpi(MSMPI_INC, MSMPI_LIB): return
# Microsoft MPI (legacy) and others
from glob import glob
ProgramFiles = os.environ.get('ProgramFiles', '')
CCP_HOME = os.environ.get('CCP_HOME', '')
for (name, prefix, suffix) in (
('msmpi', CCP_HOME, ''),
('msmpi', ProgramFiles, 'Microsoft HPC Pack 2012 R2'),
('msmpi', ProgramFiles, 'Microsoft HPC Pack 2012'),
('msmpi', ProgramFiles, 'Microsoft HPC Pack 2012 SDK'),
('msmpi', ProgramFiles, 'Microsoft HPC Pack 2008 R2'),
('msmpi', ProgramFiles, 'Microsoft HPC Pack 2008'),
('msmpi', ProgramFiles, 'Microsoft HPC Pack 2008 SDK'),
('mpich3', ProgramFiles, 'MPICH'),
('mpich2', ProgramFiles, 'MPICH2'),
('openmpi', ProgramFiles, 'OpenMPI'),
('openmpi', ProgramFiles, 'OpenMPI*'),
('deinompi', ProgramFiles, 'DeinoMPI'),
):
mpi_dir = os.path.join(prefix, suffix)
if '*' in mpi_dir:
dirs = glob(mpi_dir)
if dirs:
mpi_dir = max(dirs)
if not (mpi_dir and os.path.isdir(mpi_dir)):
continue
define_macros = []
include_dir = os.path.join(mpi_dir, 'include')
library = 'mpi'
library_dir = os.path.join(mpi_dir, 'lib')
if name == 'msmpi':
include_dir = os.path.join(mpi_dir, 'inc')
library = 'msmpi'
arch = platform.architecture()[0]
if arch == '32bit':
library_dir = os.path.join(library_dir, 'i386')
if arch == '64bit':
library_dir = os.path.join(library_dir, 'amd64')
if not os.path.isdir(include_dir):
include_dir = os.path.join(mpi_dir, 'include')
if name == 'openmpi':
define_macros.append(('OMPI_IMPORTS', None))
library = 'libmpi'
self.library_info.update(
define_macros=define_macros,
include_dirs=[include_dir],
libraries=[library],
library_dirs=[library_dir],
)
self.section = name
self.filename = [mpi_dir]
break
def setup_compiler_info(self, options, environ):
def find_exe(cmd, path=None):
if not cmd: return None
parts = split_quoted(cmd)
exe, args = parts[0], parts[1:]
if not os.path.isabs(exe) and path:
exe = os.path.basename(exe)
exe = find_executable(exe, path)
if not exe: return None
return ' '.join([exe]+args)
COMPILERS = (
('mpicc', ['mpicc', 'mpcc_r']),
('mpicxx', ['mpicxx', 'mpic++', 'mpiCC', 'mpCC_r']),
('mpifort', ['mpifort', 'mpfort_r']),
('mpif77', ['mpif77', 'mpf77_r']),
('mpif90', ['mpif90', 'mpf90_r']),
('mpif08', ['mpif08', 'mpf08_r']),
('mpild', []),
)
#
compiler_info = {}
PATH = environ.get('PATH', '')
for name, _ in COMPILERS:
cmd = (environ.get(name.upper()) or
getattr(options, name, None) or
self.compiler_info.get(name) or
None)
if cmd:
exe = find_exe(cmd, path=PATH)
if exe:
path = os.path.dirname(exe)
PATH = path + os.path.pathsep + PATH
compiler_info[name] = exe
else:
self.log.error("error: '%s' not found", cmd)
#
if not self and not compiler_info:
for name, candidates in COMPILERS:
for cmd in candidates:
cmd = find_exe(cmd)
if cmd:
compiler_info[name] = cmd
break
#
self.compiler_info.update(compiler_info)
def load(self, filename="mpi.cfg", section='mpi'):
if isinstance(filename, str):
filenames = filename.split(os.path.pathsep)
else:
filenames = list(filename)
if isinstance(section, str):
sections = section.split(',')
else:
sections = list(section)
#
try:
parser = ConfigParser(dict_type=OrderedDict)
except TypeError:
parser = ConfigParser()
try:
read_ok = parser.read(filenames)
except ConfigParserError:
self.log.error(
"error: parsing configuration file/s '%s'",
os.path.pathsep.join(filenames))
return None
for section in sections:
if parser.has_section(section):
break
section = None
if not section:
self.log.error(
"error: section/s '%s' not found in file/s '%s'",
','.join(sections), os.path.pathsep.join(filenames))
return None
parser_items = list(parser.items(section, vars=None))
#
compiler_info = type(self.compiler_info)()
for option, value in parser_items:
if option in self.compiler_info:
compiler_info[option] = value
#
pathsep = os.path.pathsep
expanduser = os.path.expanduser
expandvars = os.path.expandvars
library_info = type(self.library_info)()
for k, v in parser_items:
if k in ('define_macros',
'undef_macros',
):
macros = [e.strip() for e in v.split(',')]
if k == 'define_macros':
for i, m in enumerate(macros):
try: # -DFOO=bar
idx = m.index('=')
macro = (m[:idx], m[idx+1:] or None)
except ValueError: # -DFOO
macro = (m, None)
macros[i] = macro
library_info[k] = macros
elif k in ('include_dirs',
'library_dirs',
'runtime_dirs',
'runtime_library_dirs',
):
if k == 'runtime_dirs': k = 'runtime_library_dirs'
pathlist = [p.strip() for p in v.split(pathsep)]
library_info[k] = [expanduser(expandvars(p))
for p in pathlist if p]
elif k == 'libraries':
library_info[k] = [e.strip() for e in split_quoted(v)]
elif k in ('extra_compile_args',
'extra_link_args',
):
library_info[k] = split_quoted(v)
elif k == 'extra_objects':
library_info[k] = [expanduser(expandvars(e))
for e in split_quoted(v)]
elif hasattr(self, k):
library_info[k] = v.strip()
else:
pass
#
self.section = section
self.filename = read_ok
self.compiler_info.update(compiler_info)
self.library_info.update(library_info)
return compiler_info, library_info, section, read_ok
def dump(self, filename=None, section='mpi'):
# prepare configuration values
compiler_info = self.compiler_info.copy()
library_info = self.library_info.copy()
for k in library_info:
if k in ('define_macros',
'undef_macros',
):
macros = library_info[k]
if k == 'define_macros':
for i, (m, v) in enumerate(macros):
if v is None:
macros[i] = m
else:
macros[i] = '%s=%s' % (m, v)
library_info[k] = ','.join(macros)
elif k in ('include_dirs',
'library_dirs',
'runtime_library_dirs',
):
library_info[k] = os.path.pathsep.join(library_info[k])
elif isinstance(library_info[k], list):
library_info[k] = ' '.join(library_info[k])
# fill configuration parser
try:
parser = ConfigParser(dict_type=OrderedDict)
except TypeError:
parser = ConfigParser()
parser.add_section(section)
for option, value in compiler_info.items():
if not value: continue
parser.set(section, option, value)
for option, value in library_info.items():
if not value: continue
parser.set(section, option, value)
# save configuration file
if filename is None:
parser.write(sys.stdout)
elif hasattr(filename, 'write'):
parser.write(filename)
elif isinstance(filename, str):
with open(filename, 'w') as f:
parser.write(f)
return parser
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
parser.add_option("--mpi", type="string")
parser.add_option("--mpicc", type="string")
parser.add_option("--mpicxx", type="string")
parser.add_option("--mpifort", type="string")
parser.add_option("--mpif77", type="string")
parser.add_option("--mpif90", type="string")
parser.add_option("--mpif08", type="string")
parser.add_option("--mpild", type="string")
(opts, args) = parser.parse_args()
log = dulog.Log(dulog.INFO)
cfg = Config(log)
cfg.setup(opts)
cfg.dump()
| gpl-3.0 |
hmcmooc/muddx-platform | lms/djangoapps/courseware/courses.py | 3 | 12810 | from collections import defaultdict
from fs.errors import ResourceNotFoundError
import logging
import inspect
from path import path
from django.http import Http404
from django.conf import settings
from edxmako.shortcuts import render_to_string
from xmodule.modulestore import XML_MODULESTORE_TYPE
from xmodule.modulestore.keys import CourseKey
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore.exceptions import ItemNotFoundError
from static_replace import replace_static_urls
from xmodule.modulestore import MONGO_MODULESTORE_TYPE
from courseware.access import has_access
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module
import branding
log = logging.getLogger(__name__)
def get_request_for_thread():
"""Walk up the stack, return the nearest first argument named "request"."""
frame = None
try:
for f in inspect.stack()[1:]:
frame = f[0]
code = frame.f_code
if code.co_varnames[:1] == ("request",):
return frame.f_locals["request"]
elif code.co_varnames[:2] == ("self", "request",):
return frame.f_locals["request"]
finally:
del frame
def get_course(course_id, depth=0):
"""
Given a course id, return the corresponding course descriptor.
If the course does not exist, raises a ValueError. This is appropriate
for internal use.
depth: The number of levels of children for the modulestore to cache.
None means infinite depth. Default is to fetch no children.
"""
course = modulestore().get_course(course_id, depth=depth)
if course is None:
raise ValueError(u"Course not found: {0}".format(course_id))
return course
# TODO please rename this function to get_course_by_key at next opportunity!
def get_course_by_id(course_key, depth=0):
"""
Given a course id, return the corresponding course descriptor.
If such a course does not exist, raises a 404.
depth: The number of levels of children for the modulestore to cache. None means infinite depth
"""
course = modulestore().get_course(course_key, depth=depth)
if course:
return course
else:
raise Http404("Course not found.")
def get_course_with_access(user, action, course_key, depth=0):
"""
Given a course_key, look up the corresponding course descriptor,
check that the user has the access to perform the specified action
on the course, and return the descriptor.
Raises a 404 if the course_key is invalid, or the user doesn't have access.
depth: The number of levels of children for the modulestore to cache. None means infinite depth
"""
assert isinstance(course_key, CourseKey)
course = get_course_by_id(course_key, depth=depth)
if not has_access(user, action, course, course_key):
# Deliberately return a non-specific error message to avoid
# leaking info about access control settings
raise Http404("Course not found.")
return course
def get_opt_course_with_access(user, action, course_key):
"""
Same as get_course_with_access, except that if course_key is None,
return None without performing any access checks.
"""
if course_key is None:
return None
return get_course_with_access(user, action, course_key)
def course_image_url(course):
"""Try to look up the image url for the course. If it's not found,
log an error and return the dead link"""
if course.static_asset_path or modulestore().get_modulestore_type(course.id) == XML_MODULESTORE_TYPE:
# If we are a static course with the course_image attribute
# set different than the default, return that path so that
# courses can use custom course image paths, otherwise just
# return the default static path.
url = '/static/' + (course.static_asset_path or getattr(course, 'data_dir', ''))
if hasattr(course, 'course_image') and course.course_image != course.fields['course_image'].default:
url += '/' + course.course_image
else:
url += '/images/course_image.jpg'
else:
loc = StaticContent.compute_location(course.id, course.course_image)
url = loc.to_deprecated_string()
return url
def find_file(filesystem, dirs, filename):
"""
Looks for a filename in a list of dirs on a filesystem, in the specified order.
filesystem: an OSFS filesystem
dirs: a list of path objects
filename: a string
Returns d / filename if found in dir d, else raises ResourceNotFoundError.
"""
for directory in dirs:
filepath = path(directory) / filename
if filesystem.exists(filepath):
return filepath
raise ResourceNotFoundError(u"Could not find {0}".format(filename))
def get_course_about_section(course, section_key):
"""
This returns the snippet of html to be rendered on the course about page,
given the key for the section.
Valid keys:
- overview
- title
- university
- number
- short_description
- description
- key_dates (includes start, end, exams, etc)
- video
- course_staff_short
- course_staff_extended
- requirements
- syllabus
- textbook
- faq
- more_info
- ocw_links
"""
# Many of these are stored as html files instead of some semantic
# markup. This can change without effecting this interface when we find a
# good format for defining so many snippets of text/html.
# TODO: Remove number, instructors from this list
if section_key in ['short_description', 'description', 'key_dates', 'video',
'course_staff_short', 'course_staff_extended',
'requirements', 'syllabus', 'textbook', 'faq', 'more_info',
'number', 'instructors', 'overview',
'effort', 'end_date', 'prerequisites', 'ocw_links']:
try:
request = get_request_for_thread()
loc = course.location.replace(category='about', name=section_key)
# Use an empty cache
field_data_cache = FieldDataCache([], course.id, request.user)
about_module = get_module(
request.user,
request,
loc,
field_data_cache,
course.id,
not_found_ok=True,
wrap_xmodule_display=False,
static_asset_path=course.static_asset_path
)
html = ''
if about_module is not None:
try:
html = about_module.render('student_view').content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course={course}, section_key={section_key}".format(
course=course, section_key=section_key
))
return html
except ItemNotFoundError:
log.warning(
u"Missing about section {key} in course {url}".format(key=section_key, url=course.location.to_deprecated_string())
)
return None
elif section_key == "title":
return course.display_name_with_default
elif section_key == "university":
return course.display_org_with_default
elif section_key == "number":
return course.display_number_with_default
raise KeyError("Invalid about key " + str(section_key))
def get_course_info_section(request, course, section_key):
"""
This returns the snippet of html to be rendered on the course info page,
given the key for the section.
Valid keys:
- handouts
- guest_handouts
- updates
- guest_updates
"""
usage_key = course.id.make_usage_key('course_info', section_key)
# Use an empty cache
field_data_cache = FieldDataCache([], course.id, request.user)
info_module = get_module(
request.user,
request,
usage_key,
field_data_cache,
course.id,
wrap_xmodule_display=False,
static_asset_path=course.static_asset_path
)
html = ''
if info_module is not None:
try:
html = info_module.render('student_view').content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course={course}, section_key={section_key}".format(
course=course, section_key=section_key
))
return html
# TODO: Fix this such that these are pulled in as extra course-specific tabs.
# arjun will address this by the end of October if no one does so prior to
# then.
def get_course_syllabus_section(course, section_key):
"""
This returns the snippet of html to be rendered on the syllabus page,
given the key for the section.
Valid keys:
- syllabus
- guest_syllabus
"""
# Many of these are stored as html files instead of some semantic
# markup. This can change without effecting this interface when we find a
# good format for defining so many snippets of text/html.
if section_key in ['syllabus', 'guest_syllabus']:
try:
filesys = course.system.resources_fs
# first look for a run-specific version
dirs = [path("syllabus") / course.url_name, path("syllabus")]
filepath = find_file(filesys, dirs, section_key + ".html")
with filesys.open(filepath) as html_file:
return replace_static_urls(
html_file.read().decode('utf-8'),
getattr(course, 'data_dir', None),
course_id=course.id,
static_asset_path=course.static_asset_path,
)
except ResourceNotFoundError:
log.exception(
u"Missing syllabus section {key} in course {url}".format(key=section_key, url=course.location.to_deprecated_string())
)
return "! Syllabus missing !"
raise KeyError("Invalid about key " + str(section_key))
def get_courses_by_university(user, domain=None):
'''
Returns dict of lists of courses available, keyed by course.org (ie university).
Courses are sorted by course.number.
'''
# TODO: Clean up how 'error' is done.
# filter out any courses that errored.
visible_courses = get_courses(user, domain)
universities = defaultdict(list)
for course in visible_courses:
universities[course.org].append(course)
return universities
def get_courses(user, domain=None):
'''
Returns a list of courses available, sorted by course.number
'''
courses = branding.get_visible_courses()
courses = [c for c in courses if has_access(user, 'see_exists', c)]
courses = sorted(courses, key=lambda course: course.number)
return courses
def sort_by_announcement(courses):
"""
Sorts a list of courses by their announcement date. If the date is
not available, sort them by their start date.
"""
# Sort courses by how far are they from they start day
key = lambda course: course.sorting_score
courses = sorted(courses, key=key)
return courses
def get_cms_course_link(course, page='course'):
"""
Returns a link to course_index for editing the course in cms,
assuming that the course is actually cms-backed.
"""
# This is fragile, but unfortunately the problem is that within the LMS we
# can't use the reverse calls from the CMS
return u"//{}/{}/{}".format(settings.CMS_BASE, page, unicode(course.id))
def get_cms_block_link(block, page):
"""
Returns a link to block_index for editing the course in cms,
assuming that the block is actually cms-backed.
"""
# This is fragile, but unfortunately the problem is that within the LMS we
# can't use the reverse calls from the CMS
return u"//{}/{}/{}".format(settings.CMS_BASE, page, block.location)
def get_studio_url(course_key, page):
"""
Get the Studio URL of the page that is passed in.
"""
assert(isinstance(course_key, CourseKey))
course = get_course_by_id(course_key)
is_studio_course = course.course_edit_method == "Studio"
is_mongo_course = modulestore().get_modulestore_type(course_key) == MONGO_MODULESTORE_TYPE
studio_link = None
if is_studio_course and is_mongo_course:
studio_link = get_cms_course_link(course, page)
return studio_link
| agpl-3.0 |
timopulkkinen/BubbleFish | tools/json_schema_compiler/h_generator.py | 3 | 14191 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from code import Code
from model import PropertyType, Type
import cpp_util
import schema_util
class HGenerator(object):
def __init__(self, type_generator, cpp_namespace):
self._type_generator = type_generator
self._cpp_namespace = cpp_namespace
def Generate(self, namespace):
return _Generator(namespace,
self._type_generator,
self._cpp_namespace).Generate()
class _Generator(object):
"""A .h generator for a namespace.
"""
def __init__(self, namespace, cpp_type_generator, cpp_namespace):
self._namespace = namespace
self._type_helper = cpp_type_generator
self._cpp_namespace = cpp_namespace
self._target_namespace = (
self._type_helper.GetCppNamespaceName(self._namespace))
def Generate(self):
"""Generates a Code object with the .h for a single namespace.
"""
c = Code()
(c.Append(cpp_util.CHROMIUM_LICENSE)
.Append()
.Append(cpp_util.GENERATED_FILE_MESSAGE % self._namespace.source_file)
.Append()
)
ifndef_name = cpp_util.GenerateIfndefName(self._namespace.source_file_dir,
self._target_namespace)
(c.Append('#ifndef %s' % ifndef_name)
.Append('#define %s' % ifndef_name)
.Append()
.Append('#include <map>')
.Append('#include <string>')
.Append('#include <vector>')
.Append()
.Append('#include "base/basictypes.h"')
.Append('#include "base/logging.h"')
.Append('#include "base/memory/linked_ptr.h"')
.Append('#include "base/memory/scoped_ptr.h"')
.Append('#include "base/values.h"')
.Cblock(self._type_helper.GenerateIncludes())
.Append()
)
c.Concat(cpp_util.OpenNamespace(self._cpp_namespace))
# TODO(calamity): These forward declarations should be #includes to allow
# $ref types from other files to be used as required params. This requires
# some detangling of windows and tabs which will currently lead to circular
# #includes.
forward_declarations = (
self._type_helper.GenerateForwardDeclarations())
if not forward_declarations.IsEmpty():
(c.Append()
.Cblock(forward_declarations)
)
c.Concat(self._type_helper.GetNamespaceStart())
c.Append()
if self._namespace.properties:
(c.Append('//')
.Append('// Properties')
.Append('//')
.Append()
)
for property in self._namespace.properties.values():
property_code = self._type_helper.GeneratePropertyValues(
property,
'extern const %(type)s %(name)s;')
if property_code:
c.Cblock(property_code)
if self._namespace.types:
(c.Append('//')
.Append('// Types')
.Append('//')
.Append()
.Cblock(self._GenerateTypes(self._FieldDependencyOrder(),
is_toplevel=True,
generate_typedefs=True))
)
if self._namespace.functions:
(c.Append('//')
.Append('// Functions')
.Append('//')
.Append()
)
for function in self._namespace.functions.values():
c.Cblock(self._GenerateFunction(function))
if self._namespace.events:
(c.Append('//')
.Append('// Events')
.Append('//')
.Append()
)
for event in self._namespace.events.values():
c.Cblock(self._GenerateEvent(event))
(c.Concat(self._type_helper.GetNamespaceEnd())
.Concat(cpp_util.CloseNamespace(self._cpp_namespace))
.Append('#endif // %s' % ifndef_name)
.Append()
)
return c
def _FieldDependencyOrder(self):
"""Generates the list of types in the current namespace in an order in which
depended-upon types appear before types which depend on them.
"""
dependency_order = []
def ExpandType(path, type_):
if type_ in path:
raise ValueError("Illegal circular dependency via cycle " +
", ".join(map(lambda x: x.name, path + [type_])))
for prop in type_.properties.values():
if (prop.type_ == PropertyType.REF and
schema_util.GetNamespace(prop.ref_type) == self._namespace.name):
ExpandType(path + [type_], self._namespace.types[prop.ref_type])
if not type_ in dependency_order:
dependency_order.append(type_)
for type_ in self._namespace.types.values():
ExpandType([], type_)
return dependency_order
def _GenerateEnumDeclaration(self, enum_name, type_):
"""Generate the declaration of a C++ enum.
"""
c = Code()
c.Sblock('enum %s {' % enum_name)
c.Append(self._type_helper.GetEnumNoneValue(type_) + ',')
for value in type_.enum_values:
c.Append(self._type_helper.GetEnumValue(type_, value) + ',')
return c.Eblock('};')
def _GenerateFields(self, props):
"""Generates the field declarations when declaring a type.
"""
c = Code()
needs_blank_line = False
for prop in props:
if needs_blank_line:
c.Append()
needs_blank_line = True
if prop.description:
c.Comment(prop.description)
# ANY is a base::Value which is abstract and cannot be a direct member, so
# we always need to wrap it in a scoped_ptr.
is_ptr = prop.optional or prop.type_.property_type == PropertyType.ANY
(c.Append('%s %s;' % (
self._type_helper.GetCppType(prop.type_, is_ptr=is_ptr),
prop.unix_name))
)
return c
def _GenerateType(self, type_, is_toplevel=False, generate_typedefs=False):
"""Generates a struct for |type_|.
|is_toplevel| implies that the type was declared in the "types" field
of an API schema. This determines the correct function
modifier(s).
|generate_typedefs| controls whether primitive types should be generated as
a typedef. This may not always be desired. If false,
primitive types are ignored.
"""
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
c = Code()
if type_.functions:
# Wrap functions within types in the type's namespace.
(c.Append('namespace %s {' % classname)
.Append()
)
for function in type_.functions.values():
c.Cblock(self._GenerateFunction(function))
c.Append('} // namespace %s' % classname)
elif type_.property_type == PropertyType.ARRAY:
if generate_typedefs and type_.description:
c.Comment(type_.description)
c.Cblock(self._GenerateType(type_.item_type))
if generate_typedefs:
(c.Append('typedef std::vector<%s > %s;' % (
self._type_helper.GetCppType(type_.item_type),
classname))
)
elif type_.property_type == PropertyType.STRING:
if generate_typedefs:
if type_.description:
c.Comment(type_.description)
c.Append('typedef std::string %(classname)s;')
elif type_.property_type == PropertyType.ENUM:
if type_.description:
c.Comment(type_.description)
c.Sblock('enum %(classname)s {')
c.Append('%s,' % self._type_helper.GetEnumNoneValue(type_))
for value in type_.enum_values:
c.Append('%s,' % self._type_helper.GetEnumValue(type_, value))
# Top level enums are in a namespace scope so the methods shouldn't be
# static. On the other hand, those declared inline (e.g. in an object) do.
maybe_static = '' if is_toplevel else 'static '
(c.Eblock('};')
.Append()
.Append('%sstd::string ToString(%s as_enum);' %
(maybe_static, classname))
.Append('%s%s Parse%s(const std::string& as_string);' %
(maybe_static, classname, classname))
)
elif type_.property_type == PropertyType.OBJECT:
if type_.description:
c.Comment(type_.description)
(c.Sblock('struct %(classname)s {')
.Append('%(classname)s();')
.Append('~%(classname)s();')
)
if type_.origin.from_json:
(c.Append()
.Comment('Populates a %s object from a base::Value. Returns'
' whether |out| was successfully populated.' % classname)
.Append('static bool Populate(const base::Value& value, '
'%(classname)s* out);')
)
if type_.origin.from_client:
(c.Append()
.Comment('Returns a new base::DictionaryValue representing the'
' serialized form of this %s object.' % classname)
.Append('scoped_ptr<base::DictionaryValue> ToValue() const;')
)
properties = type_.properties.values()
(c.Append()
.Cblock(self._GenerateTypes(p.type_ for p in properties))
.Cblock(self._GenerateFields(properties)))
if type_.additional_properties is not None:
# Most additionalProperties actually have type "any", which is better
# modelled as a DictionaryValue rather than a map of string -> Value.
if type_.additional_properties.property_type == PropertyType.ANY:
c.Append('base::DictionaryValue additional_properties;')
else:
(c.Cblock(self._GenerateType(type_.additional_properties))
.Append('std::map<std::string, %s> additional_properties;' %
cpp_util.PadForGenerics(
self._type_helper.GetCppType(type_.additional_properties,
is_in_container=True)))
)
(c.Eblock()
.Append()
.Sblock(' private:')
.Append('DISALLOW_COPY_AND_ASSIGN(%(classname)s);')
.Eblock('};')
)
elif type_.property_type == PropertyType.CHOICES:
if type_.description:
c.Comment(type_.description)
# Choices are modelled with optional fields for each choice. Exactly one
# field of the choice is guaranteed to be set by the compiler.
(c.Sblock('struct %(classname)s {')
.Append('%(classname)s();')
.Append('~%(classname)s();')
.Append())
c.Cblock(self._GenerateTypes(type_.choices))
if type_.origin.from_json:
(c.Comment('Populates a %s object from a base::Value. Returns'
' whether |out| was successfully populated.' % classname)
.Append('static bool Populate(const base::Value& value, '
'%(classname)s* out);')
.Append()
)
if type_.origin.from_client:
(c.Comment('Returns a new base::Value representing the'
' serialized form of this %s object.' % classname)
.Append('scoped_ptr<base::Value> ToValue() const;')
.Append()
)
c.Append('// Choices:')
for choice_type in type_.choices:
c.Append('%s as_%s;' % (
self._type_helper.GetCppType(choice_type, is_ptr=True),
choice_type.unix_name))
c.Eblock('};')
c.Substitute({'classname': classname})
return c
def _GenerateEvent(self, event):
"""Generates the namespaces for an event.
"""
c = Code()
# TODO(kalman): use event.unix_name not Classname.
event_namespace = cpp_util.Classname(event.name)
(c.Append('namespace %s {' % event_namespace)
.Append()
.Concat(self._GenerateCreateCallbackArguments(event))
.Eblock('} // namespace %s' % event_namespace)
)
return c
def _GenerateFunction(self, function):
"""Generates the namespaces and structs for a function.
"""
c = Code()
# TODO(kalman): Use function.unix_name not Classname here.
function_namespace = cpp_util.Classname(function.name)
(c.Append('namespace %s {' % function_namespace)
.Append()
.Cblock(self._GenerateFunctionParams(function))
)
if function.callback:
c.Cblock(self._GenerateFunctionResults(function.callback))
c.Append('} // namespace %s' % function_namespace)
return c
def _GenerateFunctionParams(self, function):
"""Generates the struct for passing parameters from JSON to a function.
"""
if not function.params:
return Code()
c = Code()
(c.Sblock('struct Params {')
.Append('static scoped_ptr<Params> Create(const base::ListValue& args);')
.Append('~Params();')
.Append()
.Cblock(self._GenerateTypes(p.type_ for p in function.params))
.Cblock(self._GenerateFields(function.params))
.Eblock()
.Append()
.Sblock(' private:')
.Append('Params();')
.Append()
.Append('DISALLOW_COPY_AND_ASSIGN(Params);')
.Eblock('};')
)
return c
def _GenerateTypes(self, types, is_toplevel=False, generate_typedefs=False):
"""Generate the structures required by a property such as OBJECT classes
and enums.
"""
c = Code()
for type_ in types:
c.Cblock(self._GenerateType(type_,
is_toplevel=is_toplevel,
generate_typedefs=generate_typedefs))
return c
def _GenerateCreateCallbackArguments(self, function):
"""Generates functions for passing parameters to a callback.
"""
c = Code()
params = function.params
c.Cblock(self._GenerateTypes((p.type_ for p in params), is_toplevel=True))
declaration_list = []
for param in params:
if param.description:
c.Comment(param.description)
declaration_list.append(cpp_util.GetParameterDeclaration(
param, self._type_helper.GetCppType(param.type_)))
c.Append('scoped_ptr<base::ListValue> Create(%s);' %
', '.join(declaration_list))
return c
def _GenerateFunctionResults(self, callback):
"""Generates namespace for passing a function's result back.
"""
c = Code()
(c.Append('namespace Results {')
.Append()
.Concat(self._GenerateCreateCallbackArguments(callback))
.Append('} // namespace Results')
)
return c
| bsd-3-clause |
quantmind/pulsar-cloud | setup.py | 2 | 1891 | import sys
import os
from setuptools import setup, find_packages
import cloud
def read(name):
filename = os.path.join(os.path.dirname(__file__), name)
with open(filename) as fp:
return fp.read()
def requirements(name):
install_requires = []
dependency_links = []
for line in read(name).split('\n'):
if line.startswith('-e '):
link = line[3:].strip()
if link == '.':
continue
dependency_links.append(link)
line = link.split('=')[1]
line = line.strip()
if line:
install_requires.append(line)
return install_requires, dependency_links
meta = dict(
version=cloud.__version__,
description=cloud.__doc__,
name='pulsar-cloud',
author="Luca Sbardella",
author_email="luca@quantmind.com",
url="https://github.com/quantmind/pulsar-cloud",
zip_safe=False,
license='BSD',
long_description=read('README.rst'),
setup_requires=['pulsar', 'wheel'],
install_requires=requirements('requirements.txt')[0],
packages=find_packages(include=['cloud', 'cloud.*']),
scripts=['bin/s3upload.py'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Plugins',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities']
)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'agile':
from agile.app import AgileManager
AgileManager(description='Release manager for pulsar-cloud',
argv=sys.argv[2:]).start()
else:
setup(**meta)
| bsd-3-clause |
mvj3/leetcode | 206-reverse-linked-list.py | 1 | 1626 | """
Question:
Reverse Linked List
Reverse a singly linked list.
click to show more hints.
Hint:
A linked list can be reversed either iteratively or recursively. Could you implement both?
Performance:
1. Total Accepted: 48424 Total Submissions: 141893 Difficulty: Easy
2. Your runtime beats 98.99% of python submissions.
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
the most important thing is to figure out holding two vairable, and the reverse steps.
"""
if head is None:
return None
orig_next_node = head.next
last_reverseing_node = head
last_reverseing_node.next = None
while orig_next_node:
# hold the next point
_orig_next_node = orig_next_node.next
# switch
orig_next_node.next = last_reverseing_node
# next round
last_reverseing_node = orig_next_node
orig_next_node = _orig_next_node
return last_reverseing_node
n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n5 = ListNode(5)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
assert Solution().reverseList(n1) == n5
assert n1.next is None
assert n2.next is n1
assert n3.next is n2
assert n4.next is n3
assert n5.next is n4
m1 = ListNode(1)
assert Solution().reverseList(m1) == m1
assert Solution().reverseList(None) is None
| mit |
ruguevara/neon | neon/experiments/experiment.py | 13 | 1600 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Defines how to collect components together to run an experiment.
"""
from neon.util.persist import YAMLable
class Experiment(YAMLable):
"""
Abstract base class definining the required interface for each concrete
experiment.
All items that are required to define an experiment (models, datasets, and
so forth) should be passed in as keyword arguments to the constructor.
This inherits configuration file handling via `yaml.YAMLObject
<http://pyyaml.org/wiki/PyYAMLDocumentation#YAMLObject>`_
"""
def run(self):
"""
The method that gets called to actually carry out all the steps of an
experiment.
Raises:
NotImplementedError: Create a concrete child class and implement
this method there.
"""
raise NotImplementedError()
| apache-2.0 |
jdemaeyer/scrapy | tests/test_webclient.py | 20 | 14435 | """
from twisted.internet import defer
Tests borrowed from the twisted.web.client tests.
"""
import os
import six
from six.moves.urllib.parse import urlparse
from twisted.trial import unittest
from twisted.web import server, static, util, resource
from twisted.internet import reactor, defer
from twisted.test.proto_helpers import StringTransport
from twisted.python.filepath import FilePath
from twisted.protocols.policies import WrappingFactory
from scrapy.core.downloader import webclient as client
from scrapy.http import Request, Headers
from scrapy.utils.python import to_bytes, to_unicode
def getPage(url, contextFactory=None, response_transform=None, *args, **kwargs):
"""Adapted version of twisted.web.client.getPage"""
def _clientfactory(url, *args, **kwargs):
url = to_unicode(url)
timeout = kwargs.pop('timeout', 0)
f = client.ScrapyHTTPClientFactory(
Request(url, *args, **kwargs), timeout=timeout)
f.deferred.addCallback(response_transform or (lambda r: r.body))
return f
from twisted.web.client import _makeGetterFactory
return _makeGetterFactory(to_bytes(url), _clientfactory,
contextFactory=contextFactory, *args, **kwargs).deferred
class ParseUrlTestCase(unittest.TestCase):
"""Test URL parsing facility and defaults values."""
def _parse(self, url):
f = client.ScrapyHTTPClientFactory(Request(url))
return (f.scheme, f.netloc, f.host, f.port, f.path)
def testParse(self):
lip = '127.0.0.1'
tests = (
("http://127.0.0.1?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')),
("http://127.0.0.1/?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')),
("http://127.0.0.1/foo?c=v&c2=v2#frag", ('http', lip, lip, 80, '/foo?c=v&c2=v2')),
("http://127.0.0.1:100?c=v&c2=v2#fragment", ('http', lip+':100', lip, 100, '/?c=v&c2=v2')),
("http://127.0.0.1:100/?c=v&c2=v2#frag", ('http', lip+':100', lip, 100, '/?c=v&c2=v2')),
("http://127.0.0.1:100/foo?c=v&c2=v2#frag", ('http', lip+':100', lip, 100, '/foo?c=v&c2=v2')),
("http://127.0.0.1", ('http', lip, lip, 80, '/')),
("http://127.0.0.1/", ('http', lip, lip, 80, '/')),
("http://127.0.0.1/foo", ('http', lip, lip, 80, '/foo')),
("http://127.0.0.1?param=value", ('http', lip, lip, 80, '/?param=value')),
("http://127.0.0.1/?param=value", ('http', lip, lip, 80, '/?param=value')),
("http://127.0.0.1:12345/foo", ('http', lip+':12345', lip, 12345, '/foo')),
("http://spam:12345/foo", ('http', 'spam:12345', 'spam', 12345, '/foo')),
("http://spam.test.org/foo", ('http', 'spam.test.org', 'spam.test.org', 80, '/foo')),
("https://127.0.0.1/foo", ('https', lip, lip, 443, '/foo')),
("https://127.0.0.1/?param=value", ('https', lip, lip, 443, '/?param=value')),
("https://127.0.0.1:12345/", ('https', lip+':12345', lip, 12345, '/')),
("http://scrapytest.org/foo ", ('http', 'scrapytest.org', 'scrapytest.org', 80, '/foo')),
("http://egg:7890 ", ('http', 'egg:7890', 'egg', 7890, '/')),
)
for url, test in tests:
test = tuple(
to_bytes(x) if not isinstance(x, int) else x for x in test)
self.assertEquals(client._parse(url), test, url)
def test_externalUnicodeInterference(self):
"""
L{client._parse} should return C{str} for the scheme, host, and path
elements of its return tuple, even when passed an URL which has
previously been passed to L{urlparse} as a C{unicode} string.
"""
if not six.PY2:
raise unittest.SkipTest(
"Applies only to Py2, as urls can be ONLY unicode on Py3")
badInput = u'http://example.com/path'
goodInput = badInput.encode('ascii')
self._parse(badInput) # cache badInput in urlparse_cached
scheme, netloc, host, port, path = self._parse(goodInput)
self.assertTrue(isinstance(scheme, str))
self.assertTrue(isinstance(netloc, str))
self.assertTrue(isinstance(host, str))
self.assertTrue(isinstance(path, str))
self.assertTrue(isinstance(port, int))
class ScrapyHTTPPageGetterTests(unittest.TestCase):
def test_earlyHeaders(self):
# basic test stolen from twisted HTTPageGetter
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
body="some data",
headers={
'Host': 'example.net',
'User-Agent': 'fooble',
'Cookie': 'blah blah',
'Content-Length': '12981',
'Useful': 'value'}))
self._test(factory,
b"GET /bar HTTP/1.0\r\n"
b"Content-Length: 9\r\n"
b"Useful: value\r\n"
b"Connection: close\r\n"
b"User-Agent: fooble\r\n"
b"Host: example.net\r\n"
b"Cookie: blah blah\r\n"
b"\r\n"
b"some data")
# test minimal sent headers
factory = client.ScrapyHTTPClientFactory(Request('http://foo/bar'))
self._test(factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"\r\n")
# test a simple POST with body and content-type
factory = client.ScrapyHTTPClientFactory(Request(
method='POST',
url='http://foo/bar',
body='name=value',
headers={'Content-Type': 'application/x-www-form-urlencoded'}))
self._test(factory,
b"POST /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"Connection: close\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"name=value")
# test a POST method with no body provided
factory = client.ScrapyHTTPClientFactory(Request(
method='POST',
url='http://foo/bar'
))
self._test(factory,
b"POST /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"Content-Length: 0\r\n"
b"\r\n")
# test with single and multivalued headers
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
headers={
'X-Meta-Single': 'single',
'X-Meta-Multivalued': ['value1', 'value2'],
}))
self._test(factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"X-Meta-Multivalued: value1\r\n"
b"X-Meta-Multivalued: value2\r\n"
b"X-Meta-Single: single\r\n"
b"\r\n")
# same test with single and multivalued headers but using Headers class
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
headers=Headers({
'X-Meta-Single': 'single',
'X-Meta-Multivalued': ['value1', 'value2'],
})))
self._test(factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"X-Meta-Multivalued: value1\r\n"
b"X-Meta-Multivalued: value2\r\n"
b"X-Meta-Single: single\r\n"
b"\r\n")
def _test(self, factory, testvalue):
transport = StringTransport()
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.makeConnection(transport)
self.assertEqual(
set(transport.value().splitlines()),
set(testvalue.splitlines()))
return testvalue
def test_non_standard_line_endings(self):
# regression test for: http://dev.scrapy.org/ticket/258
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar'))
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.headers = Headers()
protocol.dataReceived(b"HTTP/1.0 200 OK\n")
protocol.dataReceived(b"Hello: World\n")
protocol.dataReceived(b"Foo: Bar\n")
protocol.dataReceived(b"\n")
self.assertEqual(protocol.headers,
Headers({'Hello': ['World'], 'Foo': ['Bar']}))
from twisted.web.test.test_webclient import ForeverTakingResource, \
ErrorResource, NoLengthResource, HostHeaderResource, \
PayloadResource, BrokenDownloadResource
class EncodingResource(resource.Resource):
out_encoding = 'cp1251'
def render(self, request):
body = to_unicode(request.content.read())
request.setHeader(b'content-encoding', self.out_encoding)
return body.encode(self.out_encoding)
class WebClientTestCase(unittest.TestCase):
def _listen(self, site):
return reactor.listenTCP(0, site, interface="127.0.0.1")
def setUp(self):
name = self.mktemp()
os.mkdir(name)
FilePath(name).child("file").setContent(b"0123456789")
r = static.File(name)
r.putChild(b"redirect", util.Redirect(b"/file"))
r.putChild(b"wait", ForeverTakingResource())
r.putChild(b"error", ErrorResource())
r.putChild(b"nolength", NoLengthResource())
r.putChild(b"host", HostHeaderResource())
r.putChild(b"payload", PayloadResource())
r.putChild(b"broken", BrokenDownloadResource())
r.putChild(b"encoding", EncodingResource())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = self._listen(self.wrapper)
self.portno = self.port.getHost().port
def tearDown(self):
return self.port.stopListening()
def getURL(self, path):
return "http://127.0.0.1:%d/%s" % (self.portno, path)
def testPayload(self):
s = "0123456789" * 10
return getPage(self.getURL("payload"), body=s).addCallback(
self.assertEquals, to_bytes(s))
def testHostHeader(self):
# if we pass Host header explicitly, it should be used, otherwise
# it should extract from url
return defer.gatherResults([
getPage(self.getURL("host")).addCallback(
self.assertEquals, to_bytes("127.0.0.1:%d" % self.portno)),
getPage(self.getURL("host"), headers={"Host": "www.example.com"}).addCallback(
self.assertEquals, to_bytes("www.example.com"))])
def test_getPage(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the body of the response if the default method B{GET} is used.
"""
d = getPage(self.getURL("file"))
d.addCallback(self.assertEquals, b"0123456789")
return d
def test_getPageHead(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the empty string if the method is C{HEAD} and there is a successful
response code.
"""
def _getPage(method):
return getPage(self.getURL("file"), method=method)
return defer.gatherResults([
_getPage("head").addCallback(self.assertEqual, b""),
_getPage("HEAD").addCallback(self.assertEqual, b"")])
def test_timeoutNotTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and the page is
retrieved before the timeout period elapses, the L{Deferred} is
called back with the contents of the page.
"""
d = getPage(self.getURL("host"), timeout=100)
d.addCallback(
self.assertEquals, to_bytes("127.0.0.1:%d" % self.portno))
return d
def test_timeoutTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and that many
seconds elapse before the server responds to the request. the
L{Deferred} is errbacked with a L{error.TimeoutError}.
"""
finished = self.assertFailure(
getPage(self.getURL("wait"), timeout=0.000001),
defer.TimeoutError)
def cleanup(passthrough):
# Clean up the server which is hanging around not doing
# anything.
connected = list(six.iterkeys(self.wrapper.protocols))
# There might be nothing here if the server managed to already see
# that the connection was lost.
if connected:
connected[0].transport.loseConnection()
return passthrough
finished.addBoth(cleanup)
return finished
def testNotFound(self):
return getPage(self.getURL('notsuchfile')).addCallback(self._cbNoSuchFile)
def _cbNoSuchFile(self, pageData):
self.assert_(b'404 - No Such Resource' in pageData)
def testFactoryInfo(self):
url = self.getURL('file')
_, _, host, port, _ = client._parse(url)
factory = client.ScrapyHTTPClientFactory(Request(url))
reactor.connectTCP(to_unicode(host), port, factory)
return factory.deferred.addCallback(self._cbFactoryInfo, factory)
def _cbFactoryInfo(self, ignoredResult, factory):
self.assertEquals(factory.status, b'200')
self.assert_(factory.version.startswith(b'HTTP/'))
self.assertEquals(factory.message, b'OK')
self.assertEquals(factory.response_headers[b'content-length'], b'10')
def testRedirect(self):
return getPage(self.getURL("redirect")).addCallback(self._cbRedirect)
def _cbRedirect(self, pageData):
self.assertEquals(pageData,
b'\n<html>\n <head>\n <meta http-equiv="refresh" content="0;URL=/file">\n'
b' </head>\n <body bgcolor="#FFFFFF" text="#000000">\n '
b'<a href="/file">click here</a>\n </body>\n</html>\n')
def test_Encoding(self):
""" Test that non-standart body encoding matches
Content-Encoding header """
body = b'\xd0\x81\xd1\x8e\xd0\xaf'
return getPage(
self.getURL('encoding'), body=body, response_transform=lambda r: r)\
.addCallback(self._check_Encoding, body)
def _check_Encoding(self, response, original_body):
content_encoding = to_unicode(response.headers[b'Content-Encoding'])
self.assertEquals(content_encoding, EncodingResource.out_encoding)
self.assertEquals(
response.body.decode(content_encoding), to_unicode(original_body))
| bsd-3-clause |
hwu25/AppPkg | Applications/Python/Python-2.7.2/Tools/scripts/diff.py | 9 | 2051 | """ Command line interface to difflib.py providing diffs in four formats:
* ndiff: lists every line and highlights interline changes.
* context: highlights clusters of changes in a before/after format.
* unified: highlights clusters of changes in an inline format.
* html: generates side by side comparison with change highlights.
"""
import sys, os, time, difflib, optparse
def main():
usage = "usage: %prog [options] fromfile tofile"
parser = optparse.OptionParser(usage)
parser.add_option("-c", action="store_true", default=False, help='Produce a context format diff (default)')
parser.add_option("-u", action="store_true", default=False, help='Produce a unified format diff')
parser.add_option("-m", action="store_true", default=False, help='Produce HTML side by side diff (can use -c and -l in conjunction)')
parser.add_option("-n", action="store_true", default=False, help='Produce a ndiff format diff')
parser.add_option("-l", "--lines", type="int", default=3, help='Set number of context lines (default 3)')
(options, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
if len(args) != 2:
parser.error("need to specify both a fromfile and tofile")
n = options.lines
fromfile, tofile = args
fromdate = time.ctime(os.stat(fromfile).st_mtime)
todate = time.ctime(os.stat(tofile).st_mtime)
fromlines = open(fromfile, 'U').readlines()
tolines = open(tofile, 'U').readlines()
if options.u:
diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
elif options.n:
diff = difflib.ndiff(fromlines, tolines)
elif options.m:
diff = difflib.HtmlDiff().make_file(fromlines,tolines,fromfile,tofile,context=options.c,numlines=n)
else:
diff = difflib.context_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
sys.stdout.writelines(diff)
if __name__ == '__main__':
main()
| bsd-2-clause |
stenix71/izpack | src/doc-reST/confluencize.py | 14 | 3562 | #!/usr/bin/env python
import string
from glob import glob
from xml.etree.ElementTree import ElementTree, tostring
def warn_missing(element):
if element.tag not in action: print '============( %s )============' % element.tag
pass
action = {
'title': lambda element, context: ('\nh%s. %s\n\n' % (context['header_depth'], decode_inline(element, context)), False),
'paragraph': lambda element, context: ('%s%s' % (decode_inline(element, context), context['paragraph_newlines']), False),
'list_item': lambda element, context: ('%s ' % context['list_prefix'], True),
'reference': lambda element, context: ('[%s|%s]' % (element.text, element.get('refuri', '')), True),
'literal_block': lambda element, context: ('\n{code}\n%s\n{code}\n\n' % element.text, False),
'block_quote': lambda element, context: ('\n{quote}\n%s\n{quote}\n\n' % element.text, True),
'strong': lambda element, context: ('*%s*' % element.text, True),
'literal': lambda element, context: ('{{%s}}' % element.text, True),
'emphasis': lambda element, context: ('_%s_' % element.text, True),
'image': lambda element, context: ('\n{note:title=There was an image}%s{note}\n\n' % element.attrib['uri'], False),
'term': lambda element, context: ('%s ' % context['list_prefix'], True),
'table': lambda element, context: ('\n{note:title=Table to format}{code}%s{code}{note}\n\n' % tostring(element), False)
}
initial_context = {
'header_depth': 1,
'list_prefix' : '',
'paragraph_newlines': '\n\n'
}
def decode_inline(element, context):
warn_missing(element)
text = element.text
if text is None: text = ''
tail = element.tail
if tail is None: tail = ''
subs = []
for child in element.getchildren():
if child.tag in action:
output = action[child.tag](child, context)[0]
subs.append(output)
else:
subs.append(decode_inline(child, context))
sub = string.join(subs, '')
return '%s%s%s' % (text, sub, tail)
def convert(source, target):
tree = ElementTree()
tree.parse(source)
out = open(target, "w")
def walk(element, context):
warn_missing(element)
if element.tag in action:
output, walk_children = action[element.tag](element, context)
if output is not None:
print output
out.write(output)
if not walk_children:
return
for child in element.getchildren():
new_context = dict(context)
if element.tag == 'section':
new_context['header_depth'] = new_context['header_depth'] + 1
elif element.tag == 'bullet_list':
new_context['list_prefix'] = new_context['list_prefix'] + '*'
new_context['paragraph_newlines'] = '\n'
elif element.tag == 'enumerated_list':
new_context['list_prefix'] = new_context['list_prefix'] + '#'
new_context['paragraph_newlines'] = '\n'
elif element.tag == 'definition_list':
new_context['list_prefix'] = new_context['list_prefix'] + '*'
new_context['paragraph_newlines'] = '\n'
walk(child, new_context)
for child in tree.getroot().getchildren():
walk(child, initial_context)
out.close()
if __name__ == '__main__':
for xml_file in glob('xml/*.xml'):
convert(xml_file, xml_file[0:-4] + '.txt')
| apache-2.0 |
mtconley/turntable | test/lib/python2.7/site-packages/numpy/distutils/mingw32ccompiler.py | 57 | 22435 | """
Support code for building Python extensions on Windows.
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# 3. Force windows to use g77
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import subprocess
import re
# Overwrite certain distutils.ccompiler functions:
import numpy.distutils.ccompiler
if sys.version_info[0] < 3:
from . import log
else:
from numpy.distutils import log
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# --> this is done in numpy/distutils/ccompiler.py
# 3. Force windows to use g77
import distutils.cygwinccompiler
from distutils.version import StrictVersion
from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils.unixccompiler import UnixCCompiler
from distutils.msvccompiler import get_build_version as get_build_msvc_version
from numpy.distutils.misc_util import msvc_runtime_library, get_build_architecture
# Useful to generate table of symbols from a dll
_START = re.compile(r'\[Ordinal/Name Pointer\] Table')
_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)')
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
""" A modified MingW32 compiler compatible with an MSVC built Python.
"""
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
distutils.cygwinccompiler.CygwinCCompiler.__init__ (self,
verbose, dry_run, force)
# we need to support 3.2 which doesn't match the standard
# get_versions methods regex
if self.gcc_version is None:
import re
p = subprocess.Popen(['gcc', '-dumpversion'], shell=True,
stdout=subprocess.PIPE)
out_string = p.stdout.read()
p.stdout.close()
result = re.search('(\d+\.\d+)', out_string)
if result:
self.gcc_version = StrictVersion(result.group(1))
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if self.linker_dll == 'dllwrap':
# Commented out '--driver-name g++' part that fixes weird
# g++.exe: g++: No such file or directory
# error (mingw 1.0 in Enthon24 tree, gcc-3.4.5).
# If the --driver-name part is required for some environment
# then make the inclusion of this part specific to that environment.
self.linker = 'dllwrap' # --driver-name g++'
elif self.linker_dll == 'gcc':
self.linker = 'g++'
# **changes: eric jones 4/11/01
# 1. Check for import library on Windows. Build if it doesn't exist.
build_import_library()
# Check for custom msvc runtime library on Windows. Build if it doesn't exist.
msvcr_success = build_msvcr_library()
msvcr_dbg_success = build_msvcr_library(debug=True)
if msvcr_success or msvcr_dbg_success:
# add preprocessor statement for using customized msvcr lib
self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')
# Define the MSVC version as hint for MinGW
msvcr_version = '0x%03i0' % int(msvc_runtime_library().lstrip('msvcr'))
self.define_macro('__MSVCRT_VERSION__', msvcr_version)
# **changes: eric jones 4/11/01
# 2. increased optimization and turned off all warnings
# 3. also added --driver-name g++
#self.set_executables(compiler='gcc -mno-cygwin -O2 -w',
# compiler_so='gcc -mno-cygwin -mdll -O2 -w',
# linker_exe='gcc -mno-cygwin',
# linker_so='%s --driver-name g++ -mno-cygwin -mdll -static %s'
# % (self.linker, entry_point))
# MS_WIN64 should be defined when building for amd64 on windows, but
# python headers define it only for MS compilers, which has all kind of
# bad consequences, like using Py_ModuleInit4 instead of
# Py_ModuleInit4_64, etc... So we add it here
if get_build_architecture() == 'AMD64':
if self.gcc_version < "4.0":
self.set_executables(
compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall',
compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall -Wstrict-prototypes',
linker_exe='gcc -g -mno-cygwin',
linker_so='gcc -g -mno-cygwin -shared')
else:
# gcc-4 series releases do not support -mno-cygwin option
self.set_executables(
compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall',
compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes',
linker_exe='gcc -g',
linker_so='gcc -g -shared')
else:
if self.gcc_version <= "3.0.0":
self.set_executables(compiler='gcc -mno-cygwin -O2 -w',
compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='%s -mno-cygwin -mdll -static %s'
% (self.linker, entry_point))
elif self.gcc_version < "4.0":
self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall',
compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='g++ -mno-cygwin -shared')
else:
# gcc-4 series releases do not support -mno-cygwin option
self.set_executables(compiler='gcc -O2 -Wall',
compiler_so='gcc -O2 -Wall -Wstrict-prototypes',
linker_exe='g++ ',
linker_so='g++ -shared')
# added for python2.3 support
# we can't pass it through set_executables because pre 2.2 would fail
self.compiler_cxx = ['g++']
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
#self.dll_libraries=[]
return
# __init__ ()
def link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
export_symbols = None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# Include the appropiate MSVC runtime library if Python was built
# with MSVC >= 7.0 (MinGW standard is msvcrt)
runtime_library = msvc_runtime_library()
if runtime_library:
if not libraries:
libraries = []
libraries.append(runtime_library)
args = (self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, #export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
if self.gcc_version < "3.0.0":
func = distutils.cygwinccompiler.CygwinCCompiler.link
else:
func = UnixCCompiler.link
func(*args[:func.__code__.co_argcount])
return
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
# added these lines to strip off windows drive letters
# without it, .o files are placed next to .c files
# instead of the build directory
drv, base = os.path.splitdrive(base)
if drv:
base = base[1:]
if ext not in (self.src_extensions + ['.rc', '.res']):
raise UnknownFileError(
"unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def find_python_dll():
maj, min, micro = [int(i) for i in sys.version_info[:3]]
dllname = 'python%d%d.dll' % (maj, min)
print("Looking for %s" % dllname)
# We can't do much here:
# - find it in python main dir
# - in system32,
# - ortherwise (Sxs), I don't know how to get it.
lib_dirs = []
lib_dirs.append(sys.prefix)
lib_dirs.append(os.path.join(sys.prefix, 'lib'))
try:
lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'system32'))
except KeyError:
pass
for d in lib_dirs:
dll = os.path.join(d, dllname)
if os.path.exists(dll):
return dll
raise ValueError("%s not found in %s" % (dllname, lib_dirs))
def dump_table(dll):
st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE)
return st.stdout.readlines()
def generate_def(dll, dfile):
"""Given a dll file location, get all its exported symbols and dump them
into the given def file.
The .def file will be overwritten"""
dump = dump_table(dll)
for i in range(len(dump)):
if _START.match(dump[i].decode()):
break
else:
raise ValueError("Symbol table not found")
syms = []
for j in range(i+1, len(dump)):
m = _TABLE.match(dump[j].decode())
if m:
syms.append((int(m.group(1).strip()), m.group(2)))
else:
break
if len(syms) == 0:
log.warn('No symbols found in %s' % dll)
d = open(dfile, 'w')
d.write('LIBRARY %s\n' % os.path.basename(dll))
d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
d.write(';DATA PRELOAD SINGLE\n')
d.write('\nEXPORTS\n')
for s in syms:
#d.write('@%d %s\n' % (s[0], s[1]))
d.write('%s\n' % s[1])
d.close()
def find_dll(dll_name):
arch = {'AMD64' : 'amd64',
'Intel' : 'x86'}[get_build_architecture()]
def _find_dll_in_winsxs(dll_name):
# Walk through the WinSxS directory to find the dll.
winsxs_path = os.path.join(os.environ['WINDIR'], 'winsxs')
if not os.path.exists(winsxs_path):
return None
for root, dirs, files in os.walk(winsxs_path):
if dll_name in files and arch in root:
return os.path.join(root, dll_name)
return None
def _find_dll_in_path(dll_name):
# First, look in the Python directory, then scan PATH for
# the given dll name.
for path in [sys.prefix] + os.environ['PATH'].split(';'):
filepath = os.path.join(path, dll_name)
if os.path.exists(filepath):
return os.path.abspath(filepath)
return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name)
def build_msvcr_library(debug=False):
if os.name != 'nt':
return False
msvcr_name = msvc_runtime_library()
# Skip using a custom library for versions < MSVC 8.0
if int(msvcr_name.lstrip('msvcr')) < 80:
log.debug('Skip building msvcr library: custom functionality not present')
return False
if debug:
msvcr_name += 'd'
# Skip if custom library already exists
out_name = "lib%s.a" % msvcr_name
out_file = os.path.join(sys.prefix, 'libs', out_name)
if os.path.isfile(out_file):
log.debug('Skip building msvcr library: "%s" exists' % (out_file))
return True
# Find the msvcr dll
msvcr_dll_name = msvcr_name + '.dll'
dll_file = find_dll(msvcr_dll_name)
if not dll_file:
log.warn('Cannot build msvcr library: "%s" not found' % msvcr_dll_name)
return False
def_name = "lib%s.def" % msvcr_name
def_file = os.path.join(sys.prefix, 'libs', def_name)
log.info('Building msvcr library: "%s" (from %s)' \
% (out_file, dll_file))
# Generate a symbol definition file from the msvcr dll
generate_def(dll_file, def_file)
# Create a custom mingw library for the given symbol definitions
cmd = ['dlltool', '-d', def_file, '-l', out_file]
retcode = subprocess.call(cmd)
# Clean up symbol definitions
os.remove(def_file)
return (not retcode)
def build_import_library():
if os.name != 'nt':
return
arch = get_build_architecture()
if arch == 'AMD64':
return _build_import_library_amd64()
elif arch == 'Intel':
return _build_import_library_x86()
else:
raise ValueError("Unhandled arch %s" % arch)
def _build_import_library_amd64():
dll_file = find_python_dll()
out_name = "libpython%d%d.a" % tuple(sys.version_info[:2])
out_file = os.path.join(sys.prefix, 'libs', out_name)
if os.path.isfile(out_file):
log.debug('Skip building import library: "%s" exists' % (out_file))
return
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
log.info('Building import library (arch=AMD64): "%s" (from %s)' \
% (out_file, dll_file))
generate_def(dll_file, def_file)
cmd = ['dlltool', '-d', def_file, '-l', out_file]
subprocess.Popen(cmd)
def _build_import_library_x86():
""" Build the import libraries for Mingw32-gcc on Windows
"""
lib_name = "python%d%d.lib" % tuple(sys.version_info[:2])
lib_file = os.path.join(sys.prefix, 'libs', lib_name)
out_name = "libpython%d%d.a" % tuple(sys.version_info[:2])
out_file = os.path.join(sys.prefix, 'libs', out_name)
if not os.path.isfile(lib_file):
log.warn('Cannot build import library: "%s" not found' % (lib_file))
return
if os.path.isfile(out_file):
log.debug('Skip building import library: "%s" exists' % (out_file))
return
log.info('Building import library (ARCH=x86): "%s"' % (out_file))
from numpy.distutils import lib2def
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file)
nm_output = lib2def.getnm(nm_cmd)
dlist, flist = lib2def.parse_nm(nm_output)
lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))
dll_name = "python%d%d.dll" % tuple(sys.version_info[:2])
args = (dll_name, def_file, out_file)
cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args
status = os.system(cmd)
# for now, fail silently
if status:
log.warn('Failed to build import library for gcc. Linking will fail.')
#if not success:
# msg = "Couldn't find import library, and failed to build it."
# raise DistutilsPlatformError(msg)
return
#=====================================
# Dealing with Visual Studio MANIFESTS
#=====================================
# Functions to deal with visual studio manifests. Manifest are a mechanism to
# enforce strong DLL versioning on windows, and has nothing to do with
# distutils MANIFEST. manifests are XML files with version info, and used by
# the OS loader; they are necessary when linking against a DLL not in the
# system path; in particular, official python 2.6 binary is built against the
# MS runtime 9 (the one from VS 2008), which is not available on most windows
# systems; python 2.6 installer does install it in the Win SxS (Side by side)
# directory, but this requires the manifest for this to work. This is a big
# mess, thanks MS for a wonderful system.
# XXX: ideally, we should use exactly the same version as used by python. I
# submitted a patch to get this version, but it was only included for python
# 2.6.1 and above. So for versions below, we use a "best guess".
_MSVCRVER_TO_FULLVER = {}
if sys.platform == 'win32':
try:
import msvcrt
# I took one version in my SxS directory: no idea if it is the good
# one, and we can't retrieve it from python
_MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42"
_MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
# Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 on Windows XP:
_MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2)
_MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION
del major, minor, rest
except ImportError:
# If we are here, means python was not built with MSVC. Not sure what to do
# in that case: manifest building will fail, but it should not be used in
# that case anyway
log.warn('Cannot import msvcrt: using manifest will not be possible')
def msvc_manifest_xml(maj, min):
"""Given a major and minor version of the MSVCR, returns the
corresponding XML file."""
try:
fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)]
except KeyError:
raise ValueError("Version %d,%d of MSVCRT not supported yet" \
% (maj, min))
# Don't be fooled, it looks like an XML, but it is not. In particular, it
# should not have any space before starting, and its size should be
# divisible by 4, most likely for alignement constraints when the xml is
# embedded in the binary...
# This template was copied directly from the python 2.6 binary (using
# strings.exe from mingw on python.exe).
template = """\
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>"""
return template % {'fullver': fullver, 'maj': maj, 'min': min}
def manifest_rc(name, type='dll'):
"""Return the rc file used to generate the res file which will be embedded
as manifest for given manifest file name, of given type ('dll' or
'exe').
Parameters
----------
name : str
name of the manifest file to embed
type : str {'dll', 'exe'}
type of the binary which will embed the manifest
"""
if type == 'dll':
rctype = 2
elif type == 'exe':
rctype = 1
else:
raise ValueError("Type %s not supported" % type)
return """\
#include "winuser.h"
%d RT_MANIFEST %s""" % (rctype, name)
def check_embedded_msvcr_match_linked(msver):
"""msver is the ms runtime version used for the MANIFEST."""
# check msvcr major version are the same for linking and
# embedding
msvcv = msvc_runtime_library()
if msvcv:
assert msvcv.startswith("msvcr"), msvcv
# Dealing with something like "mscvr90" or "mscvr100", the last
# last digit is the minor release, want int("9") or int("10"):
maj = int(msvcv[5:-1])
if not maj == int(msver):
raise ValueError(
"Discrepancy between linked msvcr " \
"(%d) and the one about to be embedded " \
"(%d)" % (int(msver), maj))
def configtest_name(config):
base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c"))
return os.path.splitext(base)[0]
def manifest_name(config):
# Get configest name (including suffix)
root = configtest_name(config)
exext = config.compiler.exe_extension
return root + exext + ".manifest"
def rc_name(config):
# Get configest name (including suffix)
root = configtest_name(config)
return root + ".rc"
def generate_manifest(config):
msver = get_build_msvc_version()
if msver is not None:
if msver >= 8:
check_embedded_msvcr_match_linked(msver)
ma = int(msver)
mi = int((msver - ma) * 10)
# Write the manifest file
manxml = msvc_manifest_xml(ma, mi)
man = open(manifest_name(config), "w")
config.temp_files.append(manifest_name(config))
man.write(manxml)
man.close()
# # Write the rc file
# manrc = manifest_rc(manifest_name(self), "exe")
# rc = open(rc_name(self), "w")
# self.temp_files.append(manrc)
# rc.write(manrc)
# rc.close()
| mit |
andrewor14/iolap | examples/src/main/python/hbase_outputformat.py | 8 | 2885 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
from pyspark import SparkContext
"""
Create test table in HBase first:
hbase(main):001:0> create 'test', 'f1'
0 row(s) in 0.7840 seconds
> hbase_outputformat <host> test row1 f1 q1 value1
> hbase_outputformat <host> test row2 f1 q1 value2
> hbase_outputformat <host> test row3 f1 q1 value3
> hbase_outputformat <host> test row4 f1 q1 value4
hbase(main):002:0> scan 'test'
ROW COLUMN+CELL
row1 column=f1:q1, timestamp=1405659615726, value=value1
row2 column=f1:q1, timestamp=1405659626803, value=value2
row3 column=f1:q1, timestamp=1405659640106, value=value3
row4 column=f1:q1, timestamp=1405659650292, value=value4
4 row(s) in 0.0780 seconds
"""
if __name__ == "__main__":
if len(sys.argv) != 7:
print("""
Usage: hbase_outputformat <host> <table> <row> <family> <qualifier> <value>
Run with example jar:
./bin/spark-submit --driver-class-path /path/to/example/jar \
/path/to/examples/hbase_outputformat.py <args>
Assumes you have created <table> with column family <family> in HBase
running on <host> already
""", file=sys.stderr)
exit(-1)
host = sys.argv[1]
table = sys.argv[2]
sc = SparkContext(appName="HBaseOutputFormat")
conf = {"hbase.zookeeper.quorum": host,
"hbase.mapred.outputtable": table,
"mapreduce.outputformat.class": "org.apache.hadoop.hbase.mapreduce.TableOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.hbase.io.ImmutableBytesWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.Writable"}
keyConv = "org.apache.spark.examples.pythonconverters.StringToImmutableBytesWritableConverter"
valueConv = "org.apache.spark.examples.pythonconverters.StringListToPutConverter"
sc.parallelize([sys.argv[3:]]).map(lambda x: (x[0], x)).saveAsNewAPIHadoopDataset(
conf=conf,
keyConverter=keyConv,
valueConverter=valueConv)
sc.stop()
| apache-2.0 |
jmcarp/django | tests/m2m_signals/tests.py | 271 | 15982 | """
Testing signals emitted on changing m2m relations.
"""
from django.db import models
from django.test import TestCase
from .models import Car, Part, Person, SportsCar
class ManyToManySignalsTest(TestCase):
def m2m_changed_signal_receiver(self, signal, sender, **kwargs):
message = {
'instance': kwargs['instance'],
'action': kwargs['action'],
'reverse': kwargs['reverse'],
'model': kwargs['model'],
}
if kwargs['pk_set']:
message['objects'] = list(
kwargs['model'].objects.filter(pk__in=kwargs['pk_set'])
)
self.m2m_changed_messages.append(message)
def setUp(self):
self.m2m_changed_messages = []
self.vw = Car.objects.create(name='VW')
self.bmw = Car.objects.create(name='BMW')
self.toyota = Car.objects.create(name='Toyota')
self.wheelset = Part.objects.create(name='Wheelset')
self.doors = Part.objects.create(name='Doors')
self.engine = Part.objects.create(name='Engine')
self.airbag = Part.objects.create(name='Airbag')
self.sunroof = Part.objects.create(name='Sunroof')
self.alice = Person.objects.create(name='Alice')
self.bob = Person.objects.create(name='Bob')
self.chuck = Person.objects.create(name='Chuck')
self.daisy = Person.objects.create(name='Daisy')
def tearDown(self):
# disconnect all signal handlers
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def _initialize_signal_car(self, add_default_parts_before_set_signal=False):
""" Install a listener on the two m2m relations. """
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
if add_default_parts_before_set_signal:
# adding a default part to our car - no signal listener installed
self.vw.default_parts.add(self.sunroof)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
def test_m2m_relations_add_remove_clear(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
self.vw.default_parts.add(self.wheelset, self.doors, self.engine)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# give the BMW and Toyota some doors as well
self.doors.car_set.add(self.bmw, self.toyota)
expected_messages.append({
'instance': self.doors,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.doors,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_remove_relation(self):
self._initialize_signal_car()
# remove the engine from the self.vw and the airbag (which is not set
# but is returned)
self.vw.default_parts.remove(self.engine, self.airbag)
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
}, {
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
}
])
def test_m2m_relations_signals_give_the_self_vw_some_optional_parts(self):
expected_messages = []
self._initialize_signal_car()
# give the self.vw some optional parts (second relation to same model)
self.vw.optional_parts.add(self.airbag, self.sunroof)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# add airbag to all the cars (even though the self.vw already has one)
self.airbag.cars_optional.add(self.vw, self.bmw, self.toyota)
expected_messages.append({
'instance': self.airbag,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_reverse_relation_with_custom_related_name(self):
self._initialize_signal_car()
# remove airbag from the self.vw (reverse relation with custom
# related_name)
self.airbag.cars_optional.remove(self.vw)
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.airbag,
'action': 'pre_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
}, {
'instance': self.airbag,
'action': 'post_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
}
])
def test_m2m_relations_signals_clear_all_parts_of_the_self_vw(self):
self._initialize_signal_car()
# clear all parts of the self.vw
self.vw.default_parts.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
}, {
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
}
])
def test_m2m_relations_signals_all_the_doors_off_of_cars(self):
self._initialize_signal_car()
# take all the doors off of cars
self.doors.car_set.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.doors,
'action': 'pre_clear',
'reverse': True,
'model': Car,
}, {
'instance': self.doors,
'action': 'post_clear',
'reverse': True,
'model': Car,
}
])
def test_m2m_relations_signals_reverse_relation(self):
self._initialize_signal_car()
# take all the airbags off of cars (clear reverse relation with custom
# related_name)
self.airbag.cars_optional.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.airbag,
'action': 'pre_clear',
'reverse': True,
'model': Car,
}, {
'instance': self.airbag,
'action': 'post_clear',
'reverse': True,
'model': Car,
}
])
def test_m2m_relations_signals_alternative_ways(self):
expected_messages = []
self._initialize_signal_car()
# alternative ways of setting relation:
self.vw.default_parts.create(name='Windows')
p6 = Part.objects.get(name='Windows')
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# direct assignment clears the set first, then adds
self.vw.default_parts = [self.wheelset, self.doors, self.engine]
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_clearing_removing(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# set by clearing.
self.vw.default_parts.set([self.wheelset, self.doors, self.engine], clear=True)
expected_messages.append({
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# set by only removing what's necessary.
self.vw.default_parts.set([self.wheelset, self.doors], clear=False)
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.engine],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.engine],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_when_inheritance(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# Check that signals still work when model inheritance is involved
c4 = SportsCar.objects.create(name='Bugatti', price='1000000')
c4b = Car.objects.get(name='Bugatti')
c4.default_parts = [self.doors]
expected_messages.append({
'instance': c4,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
expected_messages.append({
'instance': c4,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.engine.car_set.add(c4)
expected_messages.append({
'instance': self.engine,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
expected_messages.append({
'instance': self.engine,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def _initialize_signal_person(self):
# Install a listener on the two m2m relations.
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def test_m2m_relations_with_self_add_friends(self):
self._initialize_signal_person()
self.alice.friends = [self.bob, self.chuck]
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
}, {
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
}
])
def test_m2m_relations_with_self_add_fan(self):
self._initialize_signal_person()
self.alice.fans = [self.daisy]
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
}, {
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
}
])
def test_m2m_relations_with_self_add_idols(self):
self._initialize_signal_person()
self.chuck.idols = [self.alice, self.bob]
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.chuck,
'action': 'pre_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
}, {
'instance': self.chuck,
'action': 'post_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
}
])
| bsd-3-clause |
ShanghaiTimes/Audacity2015 | lib-src/lv2/lv2/plugins/eg-sampler.lv2/waflib/extras/autowaf.py | 176 | 22430 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import glob
import os
import subprocess
import sys
from waflib import Configure,Context,Logs,Node,Options,Task,Utils
from waflib.TaskGen import feature,before,after
global g_is_child
g_is_child=False
global g_step
g_step=0
@feature('c','cxx')
@after('apply_incpaths')
def include_config_h(self):
self.env.append_value('INCPATHS',self.bld.bldnode.abspath())
def set_options(opt,debug_by_default=False):
global g_step
if g_step>0:
return
dirs_options=opt.add_option_group('Installation directories','')
for k in('--prefix','--destdir'):
option=opt.parser.get_option(k)
if option:
opt.parser.remove_option(k)
dirs_options.add_option(option)
dirs_options.add_option('--bindir',type='string',help="Executable programs [Default: PREFIX/bin]")
dirs_options.add_option('--configdir',type='string',help="Configuration data [Default: PREFIX/etc]")
dirs_options.add_option('--datadir',type='string',help="Shared data [Default: PREFIX/share]")
dirs_options.add_option('--includedir',type='string',help="Header files [Default: PREFIX/include]")
dirs_options.add_option('--libdir',type='string',help="Libraries [Default: PREFIX/lib]")
dirs_options.add_option('--mandir',type='string',help="Manual pages [Default: DATADIR/man]")
dirs_options.add_option('--docdir',type='string',help="HTML documentation [Default: DATADIR/doc]")
if debug_by_default:
opt.add_option('--optimize',action='store_false',default=True,dest='debug',help="Build optimized binaries")
else:
opt.add_option('--debug',action='store_true',default=False,dest='debug',help="Build debuggable binaries")
opt.add_option('--pardebug',action='store_true',default=False,dest='pardebug',help="Build parallel-installable debuggable libraries with D suffix")
opt.add_option('--grind',action='store_true',default=False,dest='grind',help="Run tests in valgrind")
opt.add_option('--strict',action='store_true',default=False,dest='strict',help="Use strict compiler flags and show all warnings")
opt.add_option('--ultra-strict',action='store_true',default=False,dest='ultra_strict',help="Use even stricter compiler flags (likely to trigger many warnings in library headers)")
opt.add_option('--docs',action='store_true',default=False,dest='docs',help="Build documentation - requires doxygen")
opt.add_option('--lv2-user',action='store_true',default=False,dest='lv2_user',help="Install LV2 bundles to user location")
opt.add_option('--lv2-system',action='store_true',default=False,dest='lv2_system',help="Install LV2 bundles to system location")
dirs_options.add_option('--lv2dir',type='string',help="LV2 bundles [Default: LIBDIR/lv2]")
g_step=1
def check_header(conf,lang,name,define='',mandatory=True):
includes=''
if sys.platform=="darwin":
includes='/opt/local/include'
if lang=='c':
check_func=conf.check_cc
elif lang=='cxx':
check_func=conf.check_cxx
else:
Logs.error("Unknown header language `%s'"%lang)
return
if define!='':
check_func(header_name=name,includes=includes,define_name=define,mandatory=mandatory)
else:
check_func(header_name=name,includes=includes,mandatory=mandatory)
def nameify(name):
return name.replace('/','_').replace('++','PP').replace('-','_').replace('.','_')
def define(conf,var_name,value):
conf.define(var_name,value)
conf.env[var_name]=value
def check_pkg(conf,name,**args):
if args['uselib_store'].lower()in conf.env['AUTOWAF_LOCAL_LIBS']:
return
class CheckType:
OPTIONAL=1
MANDATORY=2
var_name='CHECKED_'+nameify(args['uselib_store'])
check=not var_name in conf.env
mandatory=not'mandatory'in args or args['mandatory']
if not check and'atleast_version'in args:
checked_version=conf.env['VERSION_'+name]
if checked_version and checked_version<args['atleast_version']:
check=True;
if not check and mandatory and conf.env[var_name]==CheckType.OPTIONAL:
check=True;
if check:
found=None
pkg_var_name='PKG_'+name.replace('-','_')
pkg_name=name
if conf.env.PARDEBUG:
args['mandatory']=False
found=conf.check_cfg(package=pkg_name+'D',args="--cflags --libs",**args)
if found:
pkg_name+='D'
if mandatory:
args['mandatory']=True
if not found:
found=conf.check_cfg(package=pkg_name,args="--cflags --libs",**args)
if found:
conf.env[pkg_var_name]=pkg_name
if'atleast_version'in args:
conf.env['VERSION_'+name]=args['atleast_version']
if mandatory:
conf.env[var_name]=CheckType.MANDATORY
else:
conf.env[var_name]=CheckType.OPTIONAL
def normpath(path):
if sys.platform=='win32':
return os.path.normpath(path).replace('\\','/')
else:
return os.path.normpath(path)
def configure(conf):
global g_step
if g_step>1:
return
def append_cxx_flags(flags):
conf.env.append_value('CFLAGS',flags)
conf.env.append_value('CXXFLAGS',flags)
print('')
display_header('Global Configuration')
if Options.options.docs:
conf.load('doxygen')
conf.env['DOCS']=Options.options.docs
conf.env['DEBUG']=Options.options.debug or Options.options.pardebug
conf.env['PARDEBUG']=Options.options.pardebug
conf.env['PREFIX']=normpath(os.path.abspath(os.path.expanduser(conf.env['PREFIX'])))
def config_dir(var,opt,default):
if opt:
conf.env[var]=normpath(opt)
else:
conf.env[var]=normpath(default)
opts=Options.options
prefix=conf.env['PREFIX']
config_dir('BINDIR',opts.bindir,os.path.join(prefix,'bin'))
config_dir('SYSCONFDIR',opts.configdir,os.path.join(prefix,'etc'))
config_dir('DATADIR',opts.datadir,os.path.join(prefix,'share'))
config_dir('INCLUDEDIR',opts.includedir,os.path.join(prefix,'include'))
config_dir('LIBDIR',opts.libdir,os.path.join(prefix,'lib'))
config_dir('MANDIR',opts.mandir,os.path.join(conf.env['DATADIR'],'man'))
config_dir('DOCDIR',opts.docdir,os.path.join(conf.env['DATADIR'],'doc'))
if Options.options.lv2dir:
conf.env['LV2DIR']=Options.options.lv2dir
elif Options.options.lv2_user:
if sys.platform=="darwin":
conf.env['LV2DIR']=os.path.join(os.getenv('HOME'),'Library/Audio/Plug-Ins/LV2')
elif sys.platform=="win32":
conf.env['LV2DIR']=os.path.join(os.getenv('APPDATA'),'LV2')
else:
conf.env['LV2DIR']=os.path.join(os.getenv('HOME'),'.lv2')
elif Options.options.lv2_system:
if sys.platform=="darwin":
conf.env['LV2DIR']='/Library/Audio/Plug-Ins/LV2'
elif sys.platform=="win32":
conf.env['LV2DIR']=os.path.join(os.getenv('COMMONPROGRAMFILES'),'LV2')
else:
conf.env['LV2DIR']=os.path.join(conf.env['LIBDIR'],'lv2')
else:
conf.env['LV2DIR']=os.path.join(conf.env['LIBDIR'],'lv2')
conf.env['LV2DIR']=normpath(conf.env['LV2DIR'])
if Options.options.docs:
doxygen=conf.find_program('doxygen')
if not doxygen:
conf.fatal("Doxygen is required to build with --docs")
dot=conf.find_program('dot')
if not dot:
conf.fatal("Graphviz (dot) is required to build with --docs")
if Options.options.debug:
if conf.env['MSVC_COMPILER']:
conf.env['CFLAGS']=['/Od','/Zi','/MTd']
conf.env['CXXFLAGS']=['/Od','/Zi','/MTd']
conf.env['LINKFLAGS']=['/DEBUG']
else:
conf.env['CFLAGS']=['-O0','-g']
conf.env['CXXFLAGS']=['-O0','-g']
else:
if conf.env['MSVC_COMPILER']:
conf.env['CFLAGS']=['/MD']
conf.env['CXXFLAGS']=['/MD']
append_cxx_flags(['-DNDEBUG'])
if Options.options.ultra_strict:
Options.options.strict=True
conf.env.append_value('CFLAGS',['-Wredundant-decls','-Wstrict-prototypes','-Wmissing-prototypes','-Wcast-qual'])
conf.env.append_value('CXXFLAGS',['-Wcast-qual'])
if Options.options.strict:
conf.env.append_value('CFLAGS',['-pedantic','-Wshadow'])
conf.env.append_value('CXXFLAGS',['-ansi','-Wnon-virtual-dtor','-Woverloaded-virtual'])
append_cxx_flags(['-Wall','-Wcast-align','-Wextra','-Wmissing-declarations','-Wno-unused-parameter','-Wstrict-overflow','-Wundef','-Wwrite-strings','-fstrict-overflow'])
if not conf.check_cc(fragment='''
#ifndef __clang__
#error
#endif
int main() { return 0; }''',features='c',mandatory=False,execute=False,msg='Checking for clang'):
append_cxx_flags(['-Wlogical-op','-Wsuggest-attribute=noreturn','-Wunsafe-loop-optimizations'])
if not conf.env['MSVC_COMPILER']:
append_cxx_flags(['-fshow-column'])
conf.env.prepend_value('CFLAGS','-I'+os.path.abspath('.'))
conf.env.prepend_value('CXXFLAGS','-I'+os.path.abspath('.'))
display_msg(conf,"Install prefix",conf.env['PREFIX'])
display_msg(conf,"Debuggable build",str(conf.env['DEBUG']))
display_msg(conf,"Build documentation",str(conf.env['DOCS']))
print('')
g_step=2
def set_c99_mode(conf):
if conf.env.MSVC_COMPILER:
conf.env.append_unique('CFLAGS',['-TP'])
else:
conf.env.append_unique('CFLAGS',['-std=c99'])
def set_local_lib(conf,name,has_objects):
var_name='HAVE_'+nameify(name.upper())
define(conf,var_name,1)
if has_objects:
if type(conf.env['AUTOWAF_LOCAL_LIBS'])!=dict:
conf.env['AUTOWAF_LOCAL_LIBS']={}
conf.env['AUTOWAF_LOCAL_LIBS'][name.lower()]=True
else:
if type(conf.env['AUTOWAF_LOCAL_HEADERS'])!=dict:
conf.env['AUTOWAF_LOCAL_HEADERS']={}
conf.env['AUTOWAF_LOCAL_HEADERS'][name.lower()]=True
def append_property(obj,key,val):
if hasattr(obj,key):
setattr(obj,key,getattr(obj,key)+val)
else:
setattr(obj,key,val)
def use_lib(bld,obj,libs):
abssrcdir=os.path.abspath('.')
libs_list=libs.split()
for l in libs_list:
in_headers=l.lower()in bld.env['AUTOWAF_LOCAL_HEADERS']
in_libs=l.lower()in bld.env['AUTOWAF_LOCAL_LIBS']
if in_libs:
append_property(obj,'use',' lib%s '%l.lower())
append_property(obj,'framework',bld.env['FRAMEWORK_'+l])
if in_headers or in_libs:
inc_flag='-iquote '+os.path.join(abssrcdir,l.lower())
for f in['CFLAGS','CXXFLAGS']:
if not inc_flag in bld.env[f]:
bld.env.prepend_value(f,inc_flag)
else:
append_property(obj,'uselib',' '+l)
@feature('c','cxx')
@before('apply_link')
def version_lib(self):
if sys.platform=='win32':
self.vnum=None
if self.env['PARDEBUG']:
applicable=['cshlib','cxxshlib','cstlib','cxxstlib']
if[x for x in applicable if x in self.features]:
self.target=self.target+'D'
def set_lib_env(conf,name,version):
'Set up environment for local library as if found via pkg-config.'
NAME=name.upper()
major_ver=version.split('.')[0]
pkg_var_name='PKG_'+name.replace('-','_')+'_'+major_ver
lib_name='%s-%s'%(name,major_ver)
if conf.env.PARDEBUG:
lib_name+='D'
conf.env[pkg_var_name]=lib_name
conf.env['INCLUDES_'+NAME]=['${INCLUDEDIR}/%s-%s'%(name,major_ver)]
conf.env['LIBPATH_'+NAME]=[conf.env.LIBDIR]
conf.env['LIB_'+NAME]=[lib_name]
def display_header(title):
Logs.pprint('BOLD',title)
def display_msg(conf,msg,status=None,color=None):
color='CYAN'
if type(status)==bool and status or status=="True":
color='GREEN'
elif type(status)==bool and not status or status=="False":
color='YELLOW'
Logs.pprint('BOLD'," *",sep='')
Logs.pprint('NORMAL',"%s"%msg.ljust(conf.line_just-3),sep='')
Logs.pprint('BOLD',":",sep='')
Logs.pprint(color,status)
def link_flags(env,lib):
return' '.join(map(lambda x:env['LIB_ST']%x,env['LIB_'+lib]))
def compile_flags(env,lib):
return' '.join(map(lambda x:env['CPPPATH_ST']%x,env['INCLUDES_'+lib]))
def set_recursive():
global g_is_child
g_is_child=True
def is_child():
global g_is_child
return g_is_child
def build_pc(bld,name,version,version_suffix,libs,subst_dict={}):
'''Build a pkg-config file for a library.
name -- uppercase variable name (e.g. 'SOMENAME')
version -- version string (e.g. '1.2.3')
version_suffix -- name version suffix (e.g. '2')
libs -- string/list of dependencies (e.g. 'LIBFOO GLIB')
'''
pkg_prefix=bld.env['PREFIX']
if pkg_prefix[-1]=='/':
pkg_prefix=pkg_prefix[:-1]
target=name.lower()
if version_suffix!='':
target+='-'+version_suffix
if bld.env['PARDEBUG']:
target+='D'
target+='.pc'
libdir=bld.env['LIBDIR']
if libdir.startswith(pkg_prefix):
libdir=libdir.replace(pkg_prefix,'${exec_prefix}')
includedir=bld.env['INCLUDEDIR']
if includedir.startswith(pkg_prefix):
includedir=includedir.replace(pkg_prefix,'${prefix}')
obj=bld(features='subst',source='%s.pc.in'%name.lower(),target=target,install_path=os.path.join(bld.env['LIBDIR'],'pkgconfig'),exec_prefix='${prefix}',PREFIX=pkg_prefix,EXEC_PREFIX='${prefix}',LIBDIR=libdir,INCLUDEDIR=includedir)
if type(libs)!=list:
libs=libs.split()
subst_dict[name+'_VERSION']=version
subst_dict[name+'_MAJOR_VERSION']=version[0:version.find('.')]
for i in libs:
subst_dict[i+'_LIBS']=link_flags(bld.env,i)
lib_cflags=compile_flags(bld.env,i)
if lib_cflags=='':
lib_cflags=' '
subst_dict[i+'_CFLAGS']=lib_cflags
obj.__dict__.update(subst_dict)
def build_dir(name,subdir):
if is_child():
return os.path.join('build',name,subdir)
else:
return os.path.join('build',subdir)
def make_simple_dox(name):
name=name.lower()
NAME=name.upper()
try:
top=os.getcwd()
os.chdir(build_dir(name,'doc/html'))
page='group__%s.html'%name
if not os.path.exists(page):
return
for i in[['%s_API '%NAME,''],['%s_DEPRECATED '%NAME,''],['group__%s.html'%name,''],[' ',''],['<script.*><\/script>',''],['<hr\/><a name="details" id="details"><\/a><h2>.*<\/h2>',''],['<link href=\"tabs.css\" rel=\"stylesheet\" type=\"text\/css\"\/>',''],['<img class=\"footer\" src=\"doxygen.png\" alt=\"doxygen\"\/>','Doxygen']]:
os.system("sed -i 's/%s/%s/g' %s"%(i[0],i[1],page))
os.rename('group__%s.html'%name,'index.html')
for i in(glob.glob('*.png')+glob.glob('*.html')+glob.glob('*.js')+glob.glob('*.css')):
if i!='index.html'and i!='style.css':
os.remove(i)
os.chdir(top)
os.chdir(build_dir(name,'doc/man/man3'))
for i in glob.glob('*.3'):
os.system("sed -i 's/%s_API //' %s"%(NAME,i))
for i in glob.glob('_*'):
os.remove(i)
os.chdir(top)
except Exception ,e:
Logs.error("Failed to fix up %s documentation: %s"%(name,e))
def build_dox(bld,name,version,srcdir,blddir,outdir='',versioned=True):
if not bld.env['DOCS']:
return
if is_child():
src_dir=os.path.join(srcdir,name.lower())
doc_dir=os.path.join(blddir,name.lower(),'doc')
else:
src_dir=srcdir
doc_dir=os.path.join(blddir,'doc')
subst_tg=bld(features='subst',source='doc/reference.doxygen.in',target='doc/reference.doxygen',install_path='',name='doxyfile')
subst_dict={name+'_VERSION':version,name+'_SRCDIR':os.path.abspath(src_dir),name+'_DOC_DIR':os.path.abspath(doc_dir)}
subst_tg.__dict__.update(subst_dict)
subst_tg.post()
docs=bld(features='doxygen',doxyfile='doc/reference.doxygen')
docs.post()
outname=name.lower()
if versioned:
outname+='-%d'%int(version[0:version.find('.')])
bld.install_files(os.path.join('${DOCDIR}',outname,outdir,'html'),bld.path.get_bld().ant_glob('doc/html/*'))
for i in range(1,8):
bld.install_files('${MANDIR}/man%d'%i,bld.path.get_bld().ant_glob('doc/man/man%d/*'%i,excl='**/_*'))
def build_version_files(header_path,source_path,domain,major,minor,micro):
header_path=os.path.abspath(header_path)
source_path=os.path.abspath(source_path)
text="int "+domain+"_major_version = "+str(major)+";\n"
text+="int "+domain+"_minor_version = "+str(minor)+";\n"
text+="int "+domain+"_micro_version = "+str(micro)+";\n"
try:
o=open(source_path,'w')
o.write(text)
o.close()
except IOError:
Logs.error('Failed to open %s for writing\n'%source_path)
sys.exit(-1)
text="#ifndef __"+domain+"_version_h__\n"
text+="#define __"+domain+"_version_h__\n"
text+="extern const char* "+domain+"_revision;\n"
text+="extern int "+domain+"_major_version;\n"
text+="extern int "+domain+"_minor_version;\n"
text+="extern int "+domain+"_micro_version;\n"
text+="#endif /* __"+domain+"_version_h__ */\n"
try:
o=open(header_path,'w')
o.write(text)
o.close()
except IOError:
Logs.warn('Failed to open %s for writing\n'%header_path)
sys.exit(-1)
return None
def build_i18n_pot(bld,srcdir,dir,name,sources,copyright_holder=None):
Logs.info('Generating pot file from %s'%name)
pot_file='%s.pot'%name
cmd=['xgettext','--keyword=_','--keyword=N_','--keyword=S_','--from-code=UTF-8','-o',pot_file]
if copyright_holder:
cmd+=['--copyright-holder="%s"'%copyright_holder]
cmd+=sources
Logs.info('Updating '+pot_file)
subprocess.call(cmd,cwd=os.path.join(srcdir,dir))
def build_i18n_po(bld,srcdir,dir,name,sources,copyright_holder=None):
pwd=os.getcwd()
os.chdir(os.path.join(srcdir,dir))
pot_file='%s.pot'%name
po_files=glob.glob('po/*.po')
for po_file in po_files:
cmd=['msgmerge','--update',po_file,pot_file]
Logs.info('Updating '+po_file)
subprocess.call(cmd)
os.chdir(pwd)
def build_i18n_mo(bld,srcdir,dir,name,sources,copyright_holder=None):
pwd=os.getcwd()
os.chdir(os.path.join(srcdir,dir))
pot_file='%s.pot'%name
po_files=glob.glob('po/*.po')
for po_file in po_files:
mo_file=po_file.replace('.po','.mo')
cmd=['msgfmt','-c','-f','-o',mo_file,po_file]
Logs.info('Generating '+po_file)
subprocess.call(cmd)
os.chdir(pwd)
def build_i18n(bld,srcdir,dir,name,sources,copyright_holder=None):
build_i18n_pot(bld,srcdir,dir,name,sources,copyright_holder)
build_i18n_po(bld,srcdir,dir,name,sources,copyright_holder)
build_i18n_mo(bld,srcdir,dir,name,sources,copyright_holder)
def cd_to_build_dir(ctx,appname):
orig_dir=os.path.abspath(os.curdir)
top_level=(len(ctx.stack_path)>1)
if top_level:
os.chdir(os.path.join('build',appname))
else:
os.chdir('build')
Logs.pprint('GREEN',"Waf: Entering directory `%s'"%os.path.abspath(os.getcwd()))
def cd_to_orig_dir(ctx,child):
if child:
os.chdir(os.path.join('..','..'))
else:
os.chdir('..')
def pre_test(ctx,appname,dirs=['src']):
diropts=''
for i in dirs:
diropts+=' -d '+i
cd_to_build_dir(ctx,appname)
clear_log=open('lcov-clear.log','w')
try:
try:
subprocess.call(('lcov %s -z'%diropts).split(),stdout=clear_log,stderr=clear_log)
except:
Logs.warn('Failed to run lcov, no coverage report will be generated')
finally:
clear_log.close()
def post_test(ctx,appname,dirs=['src'],remove=['*boost*','c++*']):
diropts=''
for i in dirs:
diropts+=' -d '+i
coverage_log=open('lcov-coverage.log','w')
coverage_lcov=open('coverage.lcov','w')
coverage_stripped_lcov=open('coverage-stripped.lcov','w')
try:
try:
base='.'
if g_is_child:
base='..'
subprocess.call(('lcov -c %s -b %s'%(diropts,base)).split(),stdout=coverage_lcov,stderr=coverage_log)
subprocess.call(['lcov','--remove','coverage.lcov']+remove,stdout=coverage_stripped_lcov,stderr=coverage_log)
if not os.path.isdir('coverage'):
os.makedirs('coverage')
subprocess.call('genhtml -o coverage coverage-stripped.lcov'.split(),stdout=coverage_log,stderr=coverage_log)
except:
Logs.warn('Failed to run lcov, no coverage report will be generated')
finally:
coverage_stripped_lcov.close()
coverage_lcov.close()
coverage_log.close()
print('')
Logs.pprint('GREEN',"Waf: Leaving directory `%s'"%os.path.abspath(os.getcwd()))
top_level=(len(ctx.stack_path)>1)
if top_level:
cd_to_orig_dir(ctx,top_level)
print('')
Logs.pprint('BOLD','Coverage:',sep='')
print('<file://%s>\n\n'%os.path.abspath('coverage/index.html'))
def run_test(ctx,appname,test,desired_status=0,dirs=['src'],name='',header=False):
s=test
if type(test)==type([]):
s=' '.join(i)
if header:
Logs.pprint('BOLD','** Test',sep='')
Logs.pprint('NORMAL','%s'%s)
cmd=test
if Options.options.grind:
cmd='valgrind '+test
if subprocess.call(cmd,shell=True)==desired_status:
Logs.pprint('GREEN','** Pass %s'%name)
return True
else:
Logs.pprint('RED','** FAIL %s'%name)
return False
def run_tests(ctx,appname,tests,desired_status=0,dirs=['src'],name='*',headers=False):
failures=0
diropts=''
for i in dirs:
diropts+=' -d '+i
for i in tests:
if not run_test(ctx,appname,i,desired_status,dirs,i,headers):
failures+=1
print('')
if failures==0:
Logs.pprint('GREEN','** Pass: All %s.%s tests passed'%(appname,name))
else:
Logs.pprint('RED','** FAIL: %d %s.%s tests failed'%(failures,appname,name))
def run_ldconfig(ctx):
if(ctx.cmd=='install'and not ctx.env['RAN_LDCONFIG']and ctx.env['LIBDIR']and not'DESTDIR'in os.environ and not Options.options.destdir):
try:
Logs.info("Waf: Running `/sbin/ldconfig %s'"%ctx.env['LIBDIR'])
subprocess.call(['/sbin/ldconfig',ctx.env['LIBDIR']])
ctx.env['RAN_LDCONFIG']=True
except:
pass
def write_news(name,in_files,out_file,top_entries=None,extra_entries=None):
import rdflib
import textwrap
from time import strftime,strptime
doap=rdflib.Namespace('http://usefulinc.com/ns/doap#')
dcs=rdflib.Namespace('http://ontologi.es/doap-changeset#')
rdfs=rdflib.Namespace('http://www.w3.org/2000/01/rdf-schema#')
foaf=rdflib.Namespace('http://xmlns.com/foaf/0.1/')
rdf=rdflib.Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
m=rdflib.ConjunctiveGraph()
try:
for i in in_files:
m.parse(i,format='n3')
except:
Logs.warn('Error parsing data, unable to generate NEWS')
return
proj=m.value(None,rdf.type,doap.Project)
for f in m.triples([proj,rdfs.seeAlso,None]):
if f[2].endswith('.ttl'):
m.parse(f[2],format='n3')
entries={}
for r in m.triples([proj,doap.release,None]):
release=r[2]
revision=m.value(release,doap.revision,None)
date=m.value(release,doap.created,None)
blamee=m.value(release,dcs.blame,None)
changeset=m.value(release,dcs.changeset,None)
dist=m.value(release,doap['file-release'],None)
if revision and date and blamee and changeset:
entry='%s (%s) stable;\n'%(name,revision)
for i in m.triples([changeset,dcs.item,None]):
item=textwrap.wrap(m.value(i[2],rdfs.label,None),width=79)
entry+='\n * '+'\n '.join(item)
if dist and top_entries is not None:
if not str(dist)in top_entries:
top_entries[str(dist)]=[]
top_entries[str(dist)]+=['%s: %s'%(name,'\n '.join(item))]
if extra_entries:
for i in extra_entries[str(dist)]:
entry+='\n * '+i
entry+='\n\n --'
blamee_name=m.value(blamee,foaf.name,None)
blamee_mbox=m.value(blamee,foaf.mbox,None)
if blamee_name and blamee_mbox:
entry+=' %s <%s>'%(blamee_name,blamee_mbox.replace('mailto:',''))
entry+=' %s\n\n'%(strftime('%a, %d %b %Y %H:%M:%S +0000',strptime(date,'%Y-%m-%d')))
entries[(date,revision)]=entry
else:
Logs.warn('Ignored incomplete %s release description'%name)
if len(entries)>0:
news=open(out_file,'w')
for e in sorted(entries.keys(),reverse=True):
news.write(entries[e])
news.close()
| gpl-2.0 |
miurahr/translate | translate/tools/pocount.py | 24 | 12909 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2003-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Count strings and words for supported localization files.
These include: XLIFF, TMX, Gettex PO and MO, Qt .ts and .qm, Wordfast TM, etc
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/pocount.html
for examples and usage instructions.
"""
from __future__ import print_function
import logging
import os
import sys
from argparse import ArgumentParser
from translate.storage import factory, statsdb
logger = logging.getLogger(__name__)
# define style constants
style_full, style_csv, style_short_strings, style_short_words = range(4)
# default output style
default_style = style_full
def calcstats_old(filename):
"""This is the previous implementation of calcstats() and is left for
comparison and debuging purposes."""
# ignore totally blank or header units
try:
store = factory.getobject(filename)
except ValueError as e:
logger.warning(e)
return {}
units = filter(lambda unit: unit.istranslatable(), store.units)
translated = translatedmessages(units)
fuzzy = fuzzymessages(units)
review = filter(lambda unit: unit.isreview(), units)
untranslated = untranslatedmessages(units)
wordcounts = dict(map(lambda unit: (unit, statsdb.wordsinunit(unit)), units))
sourcewords = lambda elementlist: sum(map(lambda unit: wordcounts[unit][0], elementlist))
targetwords = lambda elementlist: sum(map(lambda unit: wordcounts[unit][1], elementlist))
stats = {}
# units
stats["translated"] = len(translated)
stats["fuzzy"] = len(fuzzy)
stats["untranslated"] = len(untranslated)
stats["review"] = len(review)
stats["total"] = stats["translated"] + \
stats["fuzzy"] + \
stats["untranslated"]
# words
stats["translatedsourcewords"] = sourcewords(translated)
stats["translatedtargetwords"] = targetwords(translated)
stats["fuzzysourcewords"] = sourcewords(fuzzy)
stats["untranslatedsourcewords"] = sourcewords(untranslated)
stats["reviewsourcewords"] = sourcewords(review)
stats["totalsourcewords"] = stats["translatedsourcewords"] + \
stats["fuzzysourcewords"] + \
stats["untranslatedsourcewords"]
return stats
def calcstats(filename):
statscache = statsdb.StatsCache()
return statscache.filetotals(filename, extended=True)
def summarize(title, stats, style=style_full, indent=8, incomplete_only=False):
"""Print summary for a .po file in specified format.
:param title: name of .po file
:param stats: array with translation statistics for the file specified
:param indent: indentation of the 2nd column (length of longest filename)
:param incomplete_only: omit fully translated files
:type incomplete_only: Boolean
:rtype: Boolean
:return: 1 if counting incomplete files (incomplete_only=True) and the
file is completely translated, 0 otherwise
"""
def percent(denominator, devisor):
if devisor == 0:
return 0
else:
return denominator * 100 / devisor
if incomplete_only and (stats["total"] == stats["translated"]):
return 1
if (style == style_csv):
print("%s, " % title, end=' ')
print("%d, %d, %d," % (stats["translated"],
stats["translatedsourcewords"],
stats["translatedtargetwords"]), end=' ')
print("%d, %d," % (stats["fuzzy"], stats["fuzzysourcewords"]), end=' ')
print("%d, %d," % (stats["untranslated"],
stats["untranslatedsourcewords"]), end=' ')
print("%d, %d" % (stats["total"], stats["totalsourcewords"]), end=' ')
if stats["review"] > 0:
print(", %d, %d" % (stats["review"], stats["reviewsourdcewords"]), end=' ')
print()
elif (style == style_short_strings):
spaces = " " * (indent - len(title))
print("%s%s strings: total: %d\t| %dt\t%df\t%du\t| %d%%t\t%d%%f\t%d%%u" % (
title, spaces,
stats["total"], stats["translated"], stats["fuzzy"], stats["untranslated"],
percent(stats["translated"], stats["total"]),
percent(stats["fuzzy"], stats["total"]),
percent(stats["untranslated"], stats["total"])))
elif (style == style_short_words):
spaces = " " * (indent - len(title))
print("%s%s source words: total: %d\t| %dt\t%df\t%du\t| %d%%t\t%d%%f\t%d%%u" % (
title, spaces,
stats["totalsourcewords"], stats["translatedsourcewords"], stats["fuzzysourcewords"], stats["untranslatedsourcewords"],
percent(stats["translatedsourcewords"], stats["totalsourcewords"]),
percent(stats["fuzzysourcewords"], stats["totalsourcewords"]),
percent(stats["untranslatedsourcewords"], stats["totalsourcewords"])))
else: # style == style_full
print(title)
print("type strings words (source) words (translation)")
print("translated: %5d (%3d%%) %10d (%3d%%) %15d" % (
stats["translated"],
percent(stats["translated"], stats["total"]),
stats["translatedsourcewords"],
percent(stats["translatedsourcewords"], stats["totalsourcewords"]),
stats["translatedtargetwords"]))
print("fuzzy: %5d (%3d%%) %10d (%3d%%) n/a" % (
stats["fuzzy"],
percent(stats["fuzzy"], stats["total"]),
stats["fuzzysourcewords"],
percent(stats["fuzzysourcewords"], stats["totalsourcewords"])))
print("untranslated: %5d (%3d%%) %10d (%3d%%) n/a" % (
stats["untranslated"],
percent(stats["untranslated"], stats["total"]),
stats["untranslatedsourcewords"],
percent(stats["untranslatedsourcewords"], stats["totalsourcewords"])))
print("Total: %5d %17d %22d" % (
stats["total"],
stats["totalsourcewords"],
stats["translatedtargetwords"]))
if "extended" in stats:
print("")
for state, e_stats in stats["extended"].iteritems():
print("%s: %5d (%3d%%) %10d (%3d%%) %15d" % (
state, e_stats["units"], percent(e_stats["units"], stats["total"]),
e_stats["sourcewords"], percent(e_stats["sourcewords"], stats["totalsourcewords"]),
e_stats["targetwords"]))
if stats["review"] > 0:
print("review: %5d %17d n/a" % (
stats["review"], stats["reviewsourcewords"]))
print()
return 0
def fuzzymessages(units):
return filter(lambda unit: unit.isfuzzy() and unit.target, units)
def translatedmessages(units):
return filter(lambda unit: unit.istranslated(), units)
def untranslatedmessages(units):
return filter(lambda unit: not (unit.istranslated() or unit.isfuzzy()) and unit.source, units)
class summarizer:
def __init__(self, filenames, style=default_style, incomplete_only=False):
self.totals = {}
self.filecount = 0
self.longestfilename = 0
self.style = style
self.incomplete_only = incomplete_only
self.complete_count = 0
if (self.style == style_csv):
print("""Filename, Translated Messages, Translated Source Words, Translated
Target Words, Fuzzy Messages, Fuzzy Source Words, Untranslated Messages,
Untranslated Source Words, Total Message, Total Source Words,
Review Messages, Review Source Words""")
if (self.style == style_short_strings or self.style == style_short_words):
for filename in filenames: # find longest filename
if (len(filename) > self.longestfilename):
self.longestfilename = len(filename)
for filename in filenames:
if not os.path.exists(filename):
logger.error("cannot process %s: does not exist", filename)
continue
elif os.path.isdir(filename):
self.handledir(filename)
else:
self.handlefile(filename)
if self.filecount > 1 and (self.style == style_full):
if self.incomplete_only:
summarize("TOTAL (incomplete only):", self.totals,
incomplete_only=True)
print("File count (incomplete): %5d" % (self.filecount - self.complete_count))
else:
summarize("TOTAL:", self.totals, incomplete_only=False)
print("File count: %5d" % (self.filecount))
print()
def updatetotals(self, stats):
"""Update self.totals with the statistics in stats."""
for key in stats.keys():
if key == "extended":
# FIXME: calculate extended totals
continue
if not key in self.totals:
self.totals[key] = 0
self.totals[key] += stats[key]
def handlefile(self, filename):
try:
stats = calcstats(filename)
self.updatetotals(stats)
self.complete_count += summarize(filename, stats, self.style,
self.longestfilename,
self.incomplete_only)
self.filecount += 1
except Exception: # This happens if we have a broken file.
logger.error(sys.exc_info()[1])
def handlefiles(self, dirname, filenames):
for filename in filenames:
pathname = os.path.join(dirname, filename)
if os.path.isdir(pathname):
self.handledir(pathname)
else:
self.handlefile(pathname)
def handledir(self, dirname):
path, name = os.path.split(dirname)
if name in ["CVS", ".svn", "_darcs", ".git", ".hg", ".bzr"]:
return
entries = os.listdir(dirname)
self.handlefiles(dirname, entries)
def main():
parser = ArgumentParser()
parser.add_argument("--incomplete", action="store_true", default=False,
dest="incomplete_only",
help="skip 100%% translated files.")
if sys.version_info[:2] <= (2, 6):
# Python 2.6 using argparse from PyPI cannot define a mutually
# exclusive group as a child of a group, but it works if it is a child
# of the parser. We lose the group title but the functionality works.
# See https://code.google.com/p/argparse/issues/detail?id=90
megroup = parser.add_mutually_exclusive_group()
else:
output_group = parser.add_argument_group("Output format")
megroup = output_group.add_mutually_exclusive_group()
megroup.add_argument("--full", action="store_const", const=style_full,
dest="style", default=style_full,
help="(default) statistics in full, verbose format")
megroup.add_argument("--csv", action="store_const", const=style_csv,
dest="style",
help="statistics in CSV format")
megroup.add_argument("--short", action="store_const", const=style_short_strings,
dest="style",
help="same as --short-strings")
megroup.add_argument("--short-strings", action="store_const",
const=style_short_strings, dest="style",
help="statistics of strings in short format - one line per file")
megroup.add_argument("--short-words", action="store_const",
const=style_short_words, dest="style",
help="statistics of words in short format - one line per file")
parser.add_argument("files", nargs="+")
args = parser.parse_args()
logging.basicConfig(format="%(name)s: %(levelname)s: %(message)s")
summarizer(args.files, args.style, args.incomplete_only)
if __name__ == '__main__':
main()
| gpl-2.0 |
chenjiancan/FeelUOwn | src/base/player.py | 2 | 7398 | # -*- coding:utf8 -*-
from PyQt5.QtMultimedia import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from base.common import singleton
from base.logger import LOG
@singleton
class Player(QMediaPlayer):
"""allows the playing of a media source
The Ui interact with the player with specification.
make each Mediacontent correspond to a certain music model data
它也需要维护一个 已下载歌曲的数据库,防止重复下载或者缓存歌曲(暂时这样)
"""
signal_player_media_changed = pyqtSignal([dict], [QMediaContent])
signal_playlist_is_empty = pyqtSignal()
signal_playback_mode_changed = pyqtSignal([QMediaPlaylist.PlaybackMode])
signal_player_error = pyqtSignal([str])
def __init__(self, parent=None):
super().__init__(parent)
self.__music_list = list() # 和播放列表同步,保存歌曲名,歌手等信息。(里面的对象是music_model)
self.__cache_list = list() # {id:music_id, content: media_content}
self.__playlist = QMediaPlaylist() # 播放列表。里面的对象是qmediacontent
self.setPlaylist(self.__playlist)
self.init()
def init(self):
self.set_play_mode()
self.init_signal_binding()
def init_signal_binding(self):
self.__playlist.currentIndexChanged.connect(self.on_current_index_changed)
self.__playlist.playbackModeChanged.connect(self.on_playback_mode_changed)
self.error.connect(self.on_error_occured)
def set_play_mode(self, mode=4):
# item once: 0
# item in loop: 1
# sequential: 2
# loop: 3
# random: 4
self.__playlist.setPlaybackMode(mode)
def add_music(self, music_model):
"""向当前播放列表中添加一首音乐
1. 如果这首音乐已经存在于列表当中,返回Fasle 和 index.(添加失败)
2. 如果不存在,返回True 和 index=length-1.(添加成功)
这个函数保证了当前播放列表的歌曲不会重复
:param music_model:
:return:
"""
for i, music in enumerate(self.__music_list):
if music_model['id'] == music['id']:
return False, i
self.__music_list.append(music_model)
media_content = self.get_media_content_from_model(music_model)
self.__playlist.addMedia(media_content)
length = len(self.__music_list)
index = length - 1
return True, index
def remove_music(self, mid):
for i, music_model in enumerate(self.__music_list):
if mid == music_model['id']:
if self.__playlist.currentIndex() == i:
self.__playlist.removeMedia(i)
self.__music_list.pop(i)
LOG.info(u'移除当前正在播放的歌曲')
self.__playlist.next()
break
self.__playlist.removeMedia(i)
self.__music_list.pop(i)
break
for cache in self.__cache_list:
if mid == cache['id']:
self.__cache_list.remove(cache)
return True
def get_media_content_from_model(self, music_model):
# if music_model['id'] in downloaded
mid = music_model['id']
# 判断之前是否播放过,是否已经缓存下来,以后需要改变缓存的算法
for i, each in enumerate(self.__cache_list):
if mid == each['id']:
LOG.info(music_model['name'] + ' has been cached')
return self.__cache_list[i]['content']
return self.cache_music(music_model)
def cache_music(self, music_model):
url = music_model['url']
media_content = QMediaContent(QUrl(url))
cache = dict()
cache['id'] = music_model['id']
cache['content'] = media_content
self.__cache_list.append(cache)
return media_content
def set_music_list(self, music_list):
self.__music_list = []
self.__playlist.clear()
self.play(music_list[0])
for music in music_list:
self.add_music(music)
def is_music_in_list(self, mid):
"""
:param mid: 音乐的ID
:return:
"""
for music in self.__music_list:
if mid == music['id']:
return True
return False
def play(self, music_model=None):
"""播放一首音乐
1. 如果music_model 不是None的话,就尝试将它加入当前播放列表,加入成功返回True, 否则返回False
:param music_model:
:return:
"""
if music_model is None:
super().play()
return False
# 播放一首特定的音乐
flag, index = self.add_music(music_model)
super().stop()
self.__playlist.setCurrentIndex(index)
super().play()
return flag
def when_playlist_empty(func):
def wrapper(self, *args, **kwargs):
if self.__playlist.isEmpty():
self.signal_playlist_is_empty.emit()
return
return func(self, *args, **kwargs)
return wrapper
def set_play_mode_random(self):
self.__playlist.setPlaybackMode(4)
def set_play_mode_loop(self):
self.__playlist.setPlaybackMode(3)
def set_play_mode_one_in_loop(self):
self.__playlist.setPlaybackMode(1)
@when_playlist_empty
def play_or_pause(self, flag=True):
if self.state() == QMediaPlayer.PlayingState:
self.pause()
elif self.state() == QMediaPlayer.PausedState:
self.play()
else:
self.__playlist.next()
@when_playlist_empty
@pyqtSlot()
def play_next(self, flag=True):
# self.stop()
self.__playlist.next()
# self.play()
@when_playlist_empty
@pyqtSlot()
def play_last(self, flag=True):
self.__playlist.previous()
@pyqtSlot(int)
def on_current_index_changed(self, index):
music_model = self.__music_list[index]
self.signal_player_media_changed.emit(music_model)
@pyqtSlot(QMediaPlayer.Error)
def on_error_occured(self, error):
self.pause()
if error == 2 or error == 5:
m = QMessageBox(QMessageBox.Warning, u"错误提示", "第一次运行出现该错误可能是由于缺少解码器,请参考项目主页\
https://github.com/cosven/FeelUOwn 安装依赖。\n 如果不是第一次运行,那就可能是网络已经断开,请检查您的网络连接", QMessageBox.Yes | QMessageBox.No)
if m.exec() == QMessageBox.Yes:
QApplication.quit()
else:
LOG.error(u'播放器出现error, 类型为' + str(error))
if error == 3 or error == 1:
LOG.error(u'播放器出现错误。可能是网络连接失败,也有可能缺少解码器')
return
@pyqtSlot(QMediaPlaylist.PlaybackMode)
def on_playback_mode_changed(self, playback_mode):
self.signal_playback_mode_changed.emit(playback_mode) | mit |
OsirisSPS/osiris-sps | client/share/plugins/AF9A4C281070FDB0F34CF417CDB168AB38C8A388/lib/plat-os2emx/IN.py | 77 | 1875 | # Generated by h2py from f:/emx/include/netinet/in.h
# Included from sys/param.h
PAGE_SIZE = 0x1000
HZ = 100
MAXNAMLEN = 260
MAXPATHLEN = 260
def htonl(X): return _swapl(X)
def ntohl(X): return _swapl(X)
def htons(X): return _swaps(X)
def ntohs(X): return _swaps(X)
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_EON = 80
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((long)(i) & 0xe0000000) == 0xe0000000)
def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
INADDR_ANY = 0x00000000
INADDR_LOOPBACK = 0x7f000001
INADDR_BROADCAST = 0xffffffff
INADDR_NONE = 0xffffffff
INADDR_UNSPEC_GROUP = 0xe0000000
INADDR_ALLHOSTS_GROUP = 0xe0000001
INADDR_MAX_LOCAL_GROUP = 0xe00000ff
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_MULTICAST_IF = 2
IP_MULTICAST_TTL = 3
IP_MULTICAST_LOOP = 4
IP_ADD_MEMBERSHIP = 5
IP_DROP_MEMBERSHIP = 6
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
| gpl-3.0 |
joshuajan/odoo | addons/account/installer.py | 381 | 8404 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from dateutil.relativedelta import relativedelta
import logging
from operator import itemgetter
import time
import urllib2
import urlparse
try:
import simplejson as json
except ImportError:
import json # noqa
from openerp.release import serie
from openerp.tools.translate import _
from openerp.osv import fields, osv
_logger = logging.getLogger(__name__)
class account_installer(osv.osv_memory):
_name = 'account.installer'
_inherit = 'res.config.installer'
def _get_charts(self, cr, uid, context=None):
modules = self.pool.get('ir.module.module')
# try get the list on apps server
try:
apps_server = self.pool.get('ir.module.module').get_apps_server(cr, uid, context=context)
up = urlparse.urlparse(apps_server)
url = '{0.scheme}://{0.netloc}/apps/charts?serie={1}'.format(up, serie)
j = urllib2.urlopen(url, timeout=3).read()
apps_charts = json.loads(j)
charts = dict(apps_charts)
except Exception:
charts = dict()
# Looking for the module with the 'Account Charts' category
category_name, category_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'module_category_localization_account_charts')
ids = modules.search(cr, uid, [('category_id', '=', category_id)], context=context)
if ids:
charts.update((m.name, m.shortdesc) for m in modules.browse(cr, uid, ids, context=context))
charts = sorted(charts.items(), key=itemgetter(1))
charts.insert(0, ('configurable', _('Custom')))
return charts
_columns = {
# Accounting
'charts': fields.selection(_get_charts, 'Accounting Package',
required=True,
help="Installs localized accounting charts to match as closely as "
"possible the accounting needs of your company based on your "
"country."),
'date_start': fields.date('Start Date', required=True),
'date_stop': fields.date('End Date', required=True),
'period': fields.selection([('month', 'Monthly'), ('3months', '3 Monthly')], 'Periods', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'has_default_company': fields.boolean('Has Default Company', readonly=True),
}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id and user.company_id.id or False
def _default_has_default_company(self, cr, uid, context=None):
count = self.pool.get('res.company').search_count(cr, uid, [], context=context)
return bool(count == 1)
_defaults = {
'date_start': lambda *a: time.strftime('%Y-01-01'),
'date_stop': lambda *a: time.strftime('%Y-12-31'),
'period': 'month',
'company_id': _default_company,
'has_default_company': _default_has_default_company,
'charts': 'configurable'
}
def get_unconfigured_cmp(self, cr, uid, context=None):
""" get the list of companies that have not been configured yet
but don't care about the demo chart of accounts """
company_ids = self.pool.get('res.company').search(cr, uid, [], context=context)
cr.execute("SELECT company_id FROM account_account WHERE active = 't' AND account_account.parent_id IS NULL AND name != %s", ("Chart For Automated Tests",))
configured_cmp = [r[0] for r in cr.fetchall()]
return list(set(company_ids)-set(configured_cmp))
def check_unconfigured_cmp(self, cr, uid, context=None):
""" check if there are still unconfigured companies """
if not self.get_unconfigured_cmp(cr, uid, context=context):
raise osv.except_osv(_('No Unconfigured Company!'), _("There is currently no company without chart of account. The wizard will therefore not be executed."))
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None: context = {}
res = super(account_installer, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
cmp_select = []
# display in the widget selection only the companies that haven't been configured yet
unconfigured_cmp = self.get_unconfigured_cmp(cr, uid, context=context)
for field in res['fields']:
if field == 'company_id':
res['fields'][field]['domain'] = [('id', 'in', unconfigured_cmp)]
res['fields'][field]['selection'] = [('', '')]
if unconfigured_cmp:
cmp_select = [(line.id, line.name) for line in self.pool.get('res.company').browse(cr, uid, unconfigured_cmp)]
res['fields'][field]['selection'] = cmp_select
return res
def on_change_start_date(self, cr, uid, id, start_date=False):
if start_date:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = (start_date + relativedelta(months=12)) - relativedelta(days=1)
return {'value': {'date_stop': end_date.strftime('%Y-%m-%d')}}
return {}
def execute(self, cr, uid, ids, context=None):
self.execute_simple(cr, uid, ids, context)
return super(account_installer, self).execute(cr, uid, ids, context=context)
def execute_simple(self, cr, uid, ids, context=None):
if context is None:
context = {}
fy_obj = self.pool.get('account.fiscalyear')
for res in self.read(cr, uid, ids, context=context):
if 'date_start' in res and 'date_stop' in res:
f_ids = fy_obj.search(cr, uid, [('date_start', '<=', res['date_start']), ('date_stop', '>=', res['date_stop']), ('company_id', '=', res['company_id'][0])], context=context)
if not f_ids:
name = code = res['date_start'][:4]
if int(name) != int(res['date_stop'][:4]):
name = res['date_start'][:4] + '-' + res['date_stop'][:4]
code = res['date_start'][2:4] + '-' + res['date_stop'][2:4]
vals = {
'name': name,
'code': code,
'date_start': res['date_start'],
'date_stop': res['date_stop'],
'company_id': res['company_id'][0]
}
fiscal_id = fy_obj.create(cr, uid, vals, context=context)
if res['period'] == 'month':
fy_obj.create_period(cr, uid, [fiscal_id])
elif res['period'] == '3months':
fy_obj.create_period3(cr, uid, [fiscal_id])
def modules_to_install(self, cr, uid, ids, context=None):
modules = super(account_installer, self).modules_to_install(
cr, uid, ids, context=context)
chart = self.read(cr, uid, ids, ['charts'],
context=context)[0]['charts']
_logger.debug('Installing chart of accounts %s', chart)
return (modules | set([chart])) - set(['has_default_company', 'configurable'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
apple/llvm-project | llvm/utils/lit/tests/selecting.py | 4 | 6440 | # RUN: %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-BASIC %s
# CHECK-BASIC: Testing: 5 tests
# Check that we exit with an error if we do not discover any tests, even with --allow-empty-runs.
#
# RUN: not %{lit} %{inputs}/nonexistent 2>&1 | FileCheck --check-prefix=CHECK-BAD-PATH %s
# RUN: not %{lit} %{inputs}/nonexistent --allow-empty-runs 2>&1 | FileCheck --check-prefix=CHECK-BAD-PATH %s
# CHECK-BAD-PATH: error: did not discover any tests for provided path(s)
# Check that we exit with an error if we filter out all tests, but allow it with --allow-empty-runs.
# Check that we exit with an error if we skip all tests, but allow it with --allow-empty-runs.
#
# RUN: not %{lit} --filter 'nonexistent' %{inputs}/discovery 2>&1 | FileCheck --check-prefixes=CHECK-BAD-FILTER,CHECK-BAD-FILTER-ERROR %s
# RUN: %{lit} --filter 'nonexistent' --allow-empty-runs %{inputs}/discovery 2>&1 | FileCheck --check-prefixes=CHECK-BAD-FILTER,CHECK-BAD-FILTER-ALLOW %s
# RUN: not %{lit} --filter-out '.*' %{inputs}/discovery 2>&1 | FileCheck --check-prefixes=CHECK-BAD-FILTER,CHECK-BAD-FILTER-ERROR %s
# RUN: %{lit} --filter-out '.*' --allow-empty-runs %{inputs}/discovery 2>&1 | FileCheck --check-prefixes=CHECK-BAD-FILTER,CHECK-BAD-FILTER-ALLOW %s
# CHECK-BAD-FILTER: error: filter did not match any tests (of 5 discovered).
# CHECK-BAD-FILTER-ERROR: Use '--allow-empty-runs' to suppress this error.
# CHECK-BAD-FILTER-ALLOW: Suppressing error because '--allow-empty-runs' was specified.
# Check that regex-filtering works, is case-insensitive, and can be configured via env var.
#
# RUN: %{lit} --filter 'o[a-z]e' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# RUN: %{lit} --filter 'O[A-Z]E' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# RUN: env LIT_FILTER='o[a-z]e' %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# RUN: %{lit} --filter-out 'test-t[a-z]' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# RUN: %{lit} --filter-out 'test-t[A-Z]' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# RUN: env LIT_FILTER_OUT='test-t[a-z]' %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# CHECK-FILTER: Testing: 2 of 5 tests
# CHECK-FILTER: Excluded: 3
# Check that maximum counts work
#
# RUN: %{lit} --max-tests 3 %{inputs}/discovery | FileCheck --check-prefix=CHECK-MAX %s
# CHECK-MAX: Testing: 3 of 5 tests
# CHECK-MAX: Excluded: 2
# Check that sharding partitions the testsuite in a way that distributes the
# rounding error nicely (i.e. 5/3 => 2 2 1, not 1 1 3 or whatever)
#
# RUN: %{lit} --num-shards 3 --run-shard 1 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD0-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD0-OUT < %t.out %s
# CHECK-SHARD0-ERR: note: Selecting shard 1/3 = size 2/5 = tests #(3*k)+1 = [1, 4]
# CHECK-SHARD0-OUT: Testing: 2 of 5 tests
# CHECK-SHARD0-OUT: Excluded: 3
#
# RUN: %{lit} --num-shards 3 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD1-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD1-OUT < %t.out %s
# CHECK-SHARD1-ERR: note: Selecting shard 2/3 = size 2/5 = tests #(3*k)+2 = [2, 5]
# CHECK-SHARD1-OUT: Testing: 2 of 5 tests
#
# RUN: %{lit} --num-shards 3 --run-shard 3 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD2-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD2-OUT < %t.out %s
# CHECK-SHARD2-ERR: note: Selecting shard 3/3 = size 1/5 = tests #(3*k)+3 = [3]
# CHECK-SHARD2-OUT: Testing: 1 of 5 tests
# Check that sharding via env vars works.
#
# RUN: env LIT_NUM_SHARDS=3 LIT_RUN_SHARD=1 %{lit} %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD0-ENV-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD0-ENV-OUT < %t.out %s
# CHECK-SHARD0-ENV-ERR: note: Selecting shard 1/3 = size 2/5 = tests #(3*k)+1 = [1, 4]
# CHECK-SHARD0-ENV-OUT: Testing: 2 of 5 tests
#
# RUN: env LIT_NUM_SHARDS=3 LIT_RUN_SHARD=2 %{lit} %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD1-ENV-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD1-ENV-OUT < %t.out %s
# CHECK-SHARD1-ENV-ERR: note: Selecting shard 2/3 = size 2/5 = tests #(3*k)+2 = [2, 5]
# CHECK-SHARD1-ENV-OUT: Testing: 2 of 5 tests
#
# RUN: env LIT_NUM_SHARDS=3 LIT_RUN_SHARD=3 %{lit} %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD2-ENV-ERR < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD2-ENV-OUT < %t.out %s
# CHECK-SHARD2-ENV-ERR: note: Selecting shard 3/3 = size 1/5 = tests #(3*k)+3 = [3]
# CHECK-SHARD2-ENV-OUT: Testing: 1 of 5 tests
# Check that providing more shards than tests results in 1 test per shard
# until we run out, then 0.
#
# RUN: %{lit} --num-shards 100 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-ERR1 < %t.err %s
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-OUT1 < %t.out %s
# CHECK-SHARD-BIG-ERR1: note: Selecting shard 2/100 = size 1/5 = tests #(100*k)+2 = [2]
# CHECK-SHARD-BIG-OUT1: Testing: 1 of 5 tests
#
# RUN: %{lit} --num-shards 100 --run-shard 6 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-ERR2 < %t.err %s
# CHECK-SHARD-BIG-ERR2: note: Selecting shard 6/100 = size 0/5 = tests #(100*k)+6 = []
# CHECK-SHARD-BIG-ERR2: warning: shard does not contain any tests. Consider decreasing the number of shards.
#
# RUN: %{lit} --num-shards 100 --run-shard 50 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-ERR3 < %t.err %s
# CHECK-SHARD-BIG-ERR3: note: Selecting shard 50/100 = size 0/5 = tests #(100*k)+50 = []
# CHECK-SHARD-BIG-ERR3: warning: shard does not contain any tests. Consider decreasing the number of shards.
# Check that range constraints are enforced
#
# RUN: not %{lit} --num-shards 0 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-ERR < %t.err %s
# CHECK-SHARD-ERR: error: argument --num-shards: requires positive integer, but found '0'
#
# RUN: not %{lit} --num-shards 3 --run-shard 4 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD-ERR2 < %t.err %s
# CHECK-SHARD-ERR2: error: --run-shard must be between 1 and --num-shards (inclusive)
| apache-2.0 |
VShangxiao/v2ex | html5lib/serializer/htmlserializer.py | 97 | 12663 | try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import ImmutableSet as frozenset
import gettext
_ = gettext.gettext
from html5lib.constants import voidElements, booleanAttributes, spaceCharacters
from html5lib.constants import rcdataElements, entities, xmlEntities
from html5lib import utils
from xml.sax.saxutils import escape
spaceCharacters = u"".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
from html5lib.constants import entities
encode_entity_map = {}
is_ucs4 = len(u"\U0010FFFF") == 1
for k, v in entities.items():
#skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
try:
v = ord(v)
except:
print v
raise
if not v in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index+2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index+2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;"%(hex(cp)[2:]))
return (u"".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = u'"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"minimize_boolean_attributes", "use_trailing_solidus",
"space_before_trailing_solidus", "omit_optional_tags",
"strip_whitespace", "inject_meta_charset", "escape_lt_in_attrs",
"escape_rcdata", "resolve_entities", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if kwargs.has_key('quote_char'):
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, unicode))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, unicode))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from html5lib.filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# XXX: WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from html5lib.filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from html5lib.filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from html5lib.filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = u"<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += u' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += u" SYSTEM"
if token["systemId"]:
if token["systemId"].find(u'"') >= 0:
if token["systemId"].find(u"'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = u"'"
else:
quote_char = u'"'
doctype += u" %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += u">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict(u"<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
attributes = []
for (attr_namespace,attr_name),attr_value in sorted(token["data"].items()):
#TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(u' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) \
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict(u"=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x,y: x or (y in v),
spaceCharacters + u">\"'=", False)
v = v.replace(u"&", u"&")
if self.escape_lt_in_attrs: v = v.replace(u"<", u"<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if u"'" in v and u'"' not in v:
quote_char = u'"'
elif u'"' in v and u"'" not in v:
quote_char = u"'"
if quote_char == u"'":
v = v.replace(u"'", u"'")
else:
v = v.replace(u'"', u""")
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(u" /")
else:
yield self.encodeStrict(u"/")
yield self.encode(u">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict(u"</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict(u"<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if not key in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = u"&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return "".join(list(self.serialize(treewalker, encoding)))
else:
return u"".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
| bsd-3-clause |
GarySparrow/mFlaskWeb | venv/Lib/site-packages/markdown/extensions/admonition.py | 110 | 3158 | """
Admonition extension for Python-Markdown
========================================
Adds rST-style admonitions. Inspired by [rST][] feature with the same name.
[rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions # noqa
See <https://pythonhosted.org/Markdown/extensions/admonition.html>
for documentation.
Original code Copyright [Tiago Serafim](http://www.tiagoserafim.com/).
All changes Copyright The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..blockprocessors import BlockProcessor
from ..util import etree
import re
class AdmonitionExtension(Extension):
""" Admonition extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add Admonition to Markdown instance. """
md.registerExtension(self)
md.parser.blockprocessors.add('admonition',
AdmonitionProcessor(md.parser),
'_begin')
class AdmonitionProcessor(BlockProcessor):
CLASSNAME = 'admonition'
CLASSNAME_TITLE = 'admonition-title'
RE = re.compile(r'(?:^|\n)!!!\ ?([\w\-]+)(?:\ "(.*?)")?')
def test(self, parent, block):
sibling = self.lastChild(parent)
return self.RE.search(block) or \
(block.startswith(' ' * self.tab_length) and sibling is not None and
sibling.get('class', '').find(self.CLASSNAME) != -1)
def run(self, parent, blocks):
sibling = self.lastChild(parent)
block = blocks.pop(0)
m = self.RE.search(block)
if m:
block = block[m.end() + 1:] # removes the first line
block, theRest = self.detab(block)
if m:
klass, title = self.get_class_and_title(m)
div = etree.SubElement(parent, 'div')
div.set('class', '%s %s' % (self.CLASSNAME, klass))
if title:
p = etree.SubElement(div, 'p')
p.text = title
p.set('class', self.CLASSNAME_TITLE)
else:
div = sibling
self.parser.parseChunk(div, block)
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
def get_class_and_title(self, match):
klass, title = match.group(1).lower(), match.group(2)
if title is None:
# no title was provided, use the capitalized classname as title
# e.g.: `!!! note` will render
# `<p class="admonition-title">Note</p>`
title = klass.capitalize()
elif title == '':
# an explicit blank title should not be rendered
# e.g.: `!!! warning ""` will *not* render `p` with a title
title = None
return klass, title
def makeExtension(*args, **kwargs):
return AdmonitionExtension(*args, **kwargs)
| mit |
Thermi/ocfs2-tools | ocfs2console/ocfs2interface/terminal.py | 8 | 1913 | # OCFS2Console - GUI frontend for OCFS2 management and debugging
# Copyright (C) 2005 Oracle. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 021110-1307, USA.
import gtk
from guiutil import set_props
try:
import vte
except ImportError:
terminal_ok = False
else:
terminal_ok = True
class TerminalDialog(gtk.Dialog):
def __init__(self, parent=None, title='Terminal'):
gtk.Dialog.__init__(self, parent=parent, title=title,
buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
label = gtk.Label(title)
label.set_alignment(xalign=0.0, yalign=0.5)
self.vbox.pack_start(label)
frame = gtk.Frame()
frame.set_shadow_type(gtk.SHADOW_IN)
self.vbox.pack_end(frame)
hbox = gtk.HBox()
frame.add(hbox)
self.terminal = vte.Terminal()
self.terminal.set_scrollback_lines(8192)
#self.terminal.set_font_from_string('monospace 12')
hbox.pack_start(self.terminal)
scrollbar = gtk.VScrollbar()
scrollbar.set_adjustment(self.terminal.get_adjustment())
hbox.pack_end(scrollbar)
self.show_all()
def main():
dialog = TerminalDialog()
dialog.run()
if __name__ == '__main__':
main()
| gpl-2.0 |
Lujeni/ansible | lib/ansible/plugins/cache/memory.py | 159 | 1272 | # (c) 2014, Brian Coca, Josh Drake, et al
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: memory
short_description: RAM backed, non persistent
description:
- RAM backed cache that is not persistent.
- This is the default used if no other plugin is specified.
- There are no options to configure.
version_added: historical
author: core team (@ansible-core)
'''
from ansible.plugins.cache import BaseCacheModule
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
self._cache = {}
def get(self, key):
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
def keys(self):
return self._cache.keys()
def contains(self, key):
return key in self._cache
def delete(self, key):
del self._cache[key]
def flush(self):
self._cache = {}
def copy(self):
return self._cache.copy()
def __getstate__(self):
return self.copy()
def __setstate__(self, data):
self._cache = data
| gpl-3.0 |
iivic/BoiseStateX | lms/djangoapps/certificates/tests/factories.py | 8 | 3016 | # Factories are self documenting
# pylint: disable=missing-docstring
import factory
from django.core.files.base import ContentFile
from factory.django import DjangoModelFactory, ImageField
from student.models import LinkedInAddToProfileConfiguration
from certificates.models import (
GeneratedCertificate, CertificateStatuses, CertificateHtmlViewConfiguration, CertificateWhitelist, BadgeAssertion,
BadgeImageConfiguration,
)
class GeneratedCertificateFactory(DjangoModelFactory):
class Meta(object):
model = GeneratedCertificate
course_id = None
status = CertificateStatuses.unavailable
mode = GeneratedCertificate.MODES.honor
name = ''
class CertificateWhitelistFactory(DjangoModelFactory):
class Meta(object):
model = CertificateWhitelist
course_id = None
whitelist = True
notes = None
class BadgeAssertionFactory(DjangoModelFactory):
class Meta(object):
model = BadgeAssertion
mode = 'honor'
class BadgeImageConfigurationFactory(DjangoModelFactory):
class Meta(object):
model = BadgeImageConfiguration
mode = 'honor'
icon = factory.LazyAttribute(
lambda _: ContentFile(
ImageField()._make_data( # pylint: disable=protected-access
{'color': 'blue', 'width': 50, 'height': 50, 'format': 'PNG'}
), 'test.png'
)
)
class CertificateHtmlViewConfigurationFactory(DjangoModelFactory):
class Meta(object):
model = CertificateHtmlViewConfiguration
enabled = True
configuration = """{
"default": {
"accomplishment_class_append": "accomplishment-certificate",
"platform_name": "edX",
"company_about_url": "http://www.edx.org/about-us",
"company_privacy_url": "http://www.edx.org/edx-privacy-policy",
"company_tos_url": "http://www.edx.org/edx-terms-service",
"company_verified_certificate_url": "http://www.edx.org/verified-certificate",
"document_stylesheet_url_application": "/static/certificates/sass/main-ltr.css",
"logo_src": "/static/certificates/images/logo-edx.png",
"logo_url": "http://www.edx.org"
},
"honor": {
"certificate_type": "Honor Code",
"certificate_title": "Certificate of Achievement"
},
"verified": {
"certificate_type": "Verified",
"certificate_title": "Verified Certificate of Achievement"
},
"xseries": {
"certificate_title": "XSeries Certificate of Achievement",
"certificate_type": "XSeries"
}
}"""
class LinkedInAddToProfileConfigurationFactory(DjangoModelFactory):
class Meta(object):
model = LinkedInAddToProfileConfiguration
enabled = True
company_identifier = "0_0dPSPyS070e0HsE9HNz_13_d11_"
trk_partner_name = 'unittest'
| agpl-3.0 |
elit3ge/SickRage | lib/github/GitCommit.py | 74 | 5226 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.GitAuthor
import github.GitTree
class GitCommit(github.GithubObject.CompletableGithubObject):
"""
This class represents GitCommits as returned for example by http://developer.github.com/v3/todo
"""
@property
def author(self):
"""
:type: :class:`github.GitAuthor.GitAuthor`
"""
self._completeIfNotSet(self._author)
return self._author.value
@property
def committer(self):
"""
:type: :class:`github.GitAuthor.GitAuthor`
"""
self._completeIfNotSet(self._committer)
return self._committer.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def message(self):
"""
:type: string
"""
self._completeIfNotSet(self._message)
return self._message.value
@property
def parents(self):
"""
:type: list of :class:`github.GitCommit.GitCommit`
"""
self._completeIfNotSet(self._parents)
return self._parents.value
@property
def sha(self):
"""
:type: string
"""
self._completeIfNotSet(self._sha)
return self._sha.value
@property
def tree(self):
"""
:type: :class:`github.GitTree.GitTree`
"""
self._completeIfNotSet(self._tree)
return self._tree.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def _identity(self):
return self.sha
def _initAttributes(self):
self._author = github.GithubObject.NotSet
self._committer = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._message = github.GithubObject.NotSet
self._parents = github.GithubObject.NotSet
self._sha = github.GithubObject.NotSet
self._tree = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "author" in attributes: # pragma no branch
self._author = self._makeClassAttribute(github.GitAuthor.GitAuthor, attributes["author"])
if "committer" in attributes: # pragma no branch
self._committer = self._makeClassAttribute(github.GitAuthor.GitAuthor, attributes["committer"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "message" in attributes: # pragma no branch
self._message = self._makeStringAttribute(attributes["message"])
if "parents" in attributes: # pragma no branch
self._parents = self._makeListOfClassesAttribute(GitCommit, attributes["parents"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "tree" in attributes: # pragma no branch
self._tree = self._makeClassAttribute(github.GitTree.GitTree, attributes["tree"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| gpl-3.0 |
JConwayAWT/PGSS14CC | lib/python/multimetallics/ase/io/castep.py | 3 | 23945 | # -*- coding: utf-8 -*-
"""This module defines I/O routines with CASTEP files.
The key idea is that all function accept or return atoms objects.
CASTEP specific parameters will be returned through the <atoms>.calc
attribute.
"""
from numpy import sqrt, radians, sin, cos, matrix, array, cross, float32, dot
import ase
from ase.constraints import FixAtoms, FixCartesian
from ase.parallel import paropen
import os
__all__ = [
'read_castep',
'read_cell',
'read_geom',
'read_param',
'read_seed',
'write_cell',
'write_param',
]
def write_cell(filename, atoms, positions_frac=False, castep_cell=None,
force_write=False):
"""This CASTEP export function write minimal information to
a .cell file. If the atoms object is a trajectory, it will
take the last image.
"""
if atoms is None:
print("Atoms object not initialized")
return False
if isinstance(atoms, list):
if len(atoms) > 1:
atoms = atoms[-1]
if os.path.isfile(filename) and not force_write:
print('ase.io.castep.write_param: Set optional argument')
print('force_write=True to overwrite %s.' % filename)
return False
fd = open(filename, 'w')
fd.write('#######################################################\n')
fd.write('#CASTEP cell file: %s\n' % filename)
fd.write('#Created using the Atomic Simulation Environment (ASE)#\n')
fd.write('#######################################################\n\n')
fd.write('%BLOCK LATTICE_CART\n')
cell = matrix(atoms.get_cell())
for line in atoms.get_cell():
fd.write(' %.10f %.10f %.10f\n' % tuple(line))
fd.write('%ENDBLOCK LATTICE_CART\n\n\n')
if positions_frac:
keyword = 'POSITIONS_FRAC'
positions = array(atoms.get_positions() * cell.I)
else:
keyword = 'POSITIONS_ABS'
positions = atoms.get_positions()
if atoms.get_initial_magnetic_moments().any():
pos_block = [('%s %8.6f %8.6f %8.6f SPIN=%4.2f' %
(x, y[0], y[1], y[2], m)) for (x, y, m)
in zip(atoms.get_chemical_symbols(),
positions,
atoms.get_initial_magnetic_moments())]
else:
pos_block = [('%s %8.6f %8.6f %8.6f' %
(x, y[0], y[1], y[2])) for (x, y)
in zip(atoms.get_chemical_symbols(),
positions)]
fd.write('%%BLOCK %s\n' % keyword)
for line in pos_block:
fd.write(' %s\n' % line)
fd.write('%%ENDBLOCK %s\n\n' % keyword)
# if atoms, has a CASTEP calculator attached, then only
# write constraints if really necessary
if hasattr(atoms, 'calc')\
and hasattr(atoms.calc, 'param')\
and hasattr(atoms.calc.param, 'task'):
task = atoms.calc.param.task
if atoms.calc.param.task.value is None:
suppress_constraints = True
elif task.value.lower() not in [
'geometryoptimization',
'moleculardynamics',
'transitionstatesearch',
'phonon']:
suppress_constraints = True
else:
suppress_constraints = False
else:
suppress_constraints = True
constraints = atoms.constraints
if len(constraints) and not suppress_constraints:
fd.write("%BLOCK IONIC_CONSTRAINTS \n")
count = 0
for constr in constraints:
if not isinstance(constr, FixAtoms)\
and not isinstance(constr, FixCartesian)\
and not suppress_constraints:
print('Warning: you have constraints in your atoms, that are')
print(' not supported by CASTEP')
break
if isinstance(constr, FixAtoms):
# sorry, for this complicated block
# reason is that constraint.index can either
# hold booleans or integers and in both cases
# it is an numpy array, so no simple comparison works
for n, val in enumerate(constr.index):
if val.dtype.name.startswith('bool'):
if not val:
continue
symbol = atoms.get_chemical_symbols()[n]
nis = atoms.calc._get_number_in_species(n)
elif val.dtype.name.startswith('int'):
symbol = atoms.get_chemical_symbols()[val]
nis = atoms.calc._get_number_in_species(val)
else:
raise UserWarning('Unrecognized index in' + \
' constraint %s' % constr)
fd.write("%6d %3s %3d 1 0 0 \n" % (count + 1,
symbol,
nis))
fd.write("%6d %3s %3d 0 1 0 \n" % (count + 2,
symbol,
nis))
fd.write("%6d %3s %3d 0 0 1 \n" % (count + 3,
symbol,
nis))
count += 3
elif isinstance(constr, FixCartesian):
n = constr.a
symbol = atoms.get_chemical_symbols()[n]
nis = atoms.calc._get_number_in_species(n)
fix_cart = - constr.mask + 1
if fix_cart[0]:
count += 1
fd.write("%6d %3s %3d 1 0 0 \n" % (count, symbol, nis))
if fix_cart[1]:
count += 1
fd.write("%6d %3s %3d 0 1 0 \n" % (count, symbol, nis))
if fix_cart[2]:
count += 1
fd.write("%6d %3s %3d 0 0 1 \n" % (count, symbol, nis))
fd.write("%ENDBLOCK IONIC_CONSTRAINTS \n")
if castep_cell is None:
if hasattr(atoms, 'calc') and hasattr(atoms.calc, 'cell'):
castep_cell = atoms.calc.cell
else:
fd.close()
return True
for option in castep_cell._options.values():
if option.value is not None:
if option.type == 'Block':
fd.write('%%BLOCK %s\n' % option.keyword.upper())
fd.write(option.value)
fd.write('\n%%ENDBLOCK %s\n' % option.keyword.upper())
else:
fd.write('%s : %s\n' % (option.keyword.upper(), option.value))
fd.close()
return True
def read_cell(filename, _=None):
"""Read a .cell file and return an atoms object.
Any value found that does not fit the atoms API
will be stored in the atoms.calc attribute.
"""
from ase.calculators.castep import Castep
calc = Castep()
fileobj = open(filename)
lines = fileobj.readlines()
fileobj.close()
def get_tokens(lines, l):
"""Tokenizes one line of a *cell file."""
comment_chars = "#!"
while l < len(lines):
line = lines[l].strip()
if len(line) == 0:
l += 1
continue
elif any([line.startswith(comment_char)
for comment_char in comment_chars]):
l += 1
continue
else:
for c in comment_chars:
if c in line:
icomment = min(line.index(c))
else:
icomment = len(line)
tokens = line[:icomment].split()
return tokens, l + 1
tokens = ""
print("read_cell: Warning - get_tokens has not found any more tokens")
return tokens, l
lat = []
have_lat = False
pos = []
spec = []
constraints = []
raw_constraints = {}
have_pos = False
pos_frac = False
l = 0
while l < len(lines):
tokens, l = get_tokens(lines, l)
if not tokens:
continue
elif tokens[0].upper() == "%BLOCK":
if tokens[1].upper() == "LATTICE_CART" and not have_lat:
tokens, l = get_tokens(lines, l)
if len(tokens) == 1:
print('read_cell: Warning - ignoring unit specifier in')
print('%BLOCK LATTICE_CART (assuming Angstrom instead)')
tokens, l = get_tokens(lines, l)
for _ in range(3):
lat_vec = map(float, tokens[0:3])
lat.append(lat_vec)
tokens, l = get_tokens(lines, l)
if tokens[0].upper() != "%ENDBLOCK":
print('read_cell: Warning - ignoring more than three')
print('lattice vectors in invalid %BLOCK LATTICE_CART')
print('%s ...' % tokens[0].upper())
have_lat = True
elif tokens[1].upper() == "LATTICE_ABC" and not have_lat:
tokens, l = get_tokens(lines, l)
if len(tokens) == 1:
print('read_cell: Warning - ignoring unit specifier in')
print('%BLOCK LATTICE_ABC (assuming Angstrom instead)')
tokens, l = get_tokens(lines, l)
a, b, c = map(float, tokens[0:3])
tokens, l = get_tokens(lines, l)
alpha, beta, gamma = map(lambda phi: radians(float(phi)),
tokens[0:3])
tokens, l = get_tokens(lines, l)
if tokens[0].upper() != "%ENDBLOCK":
print('read_cell: Warning - ignoring additional lines in')
print('invalid %BLOCK LATTICE_ABC')
lat_a = [a, 0, 0]
lat_b = [b * cos(gamma), b * sin(gamma), 0]
lat_c1 = c * cos(beta)
lat_c2 = c * (cos(alpha) - cos(beta) * cos(gamma)) / sin(gamma)
lat_c3 = sqrt(c * c - lat_c1 * lat_c1 - lat_c2 * lat_c2)
lat_c = [lat_c1, lat_c2, lat_c3]
lat = [lat_a, lat_b, lat_c]
have_lat = True
elif tokens[1].upper() == "POSITIONS_ABS" and not have_pos:
tokens, l = get_tokens(lines, l)
if len(tokens) == 1:
print('read_cell: Warning - ignoring unit specifier in')
print('%BLOCK POSITIONS_ABS(assuming Angstrom instead)')
tokens, l = get_tokens(lines, l)
while len(tokens) == 4:
spec.append(tokens[0])
pos.append(map(float, tokens[1:4]))
tokens, l = get_tokens(lines, l)
if tokens[0].upper() != "%ENDBLOCK":
print('read_cell: Warning - ignoring invalid lines in')
print('%%BLOCK POSITIONS_ABS:\n\t %s' % tokens)
have_pos = True
elif tokens[1].upper() == "POSITIONS_FRAC" and not have_pos:
pos_frac = True
tokens, l = get_tokens(lines, l)
while len(tokens) == 4:
spec.append(tokens[0])
pos.append(map(float, tokens[1:4]))
tokens, l = get_tokens(lines, l)
if tokens[0].upper() != "%ENDBLOCK":
print('read_cell: Warning - ignoring invalid lines')
print('%%BLOCK POSITIONS_FRAC:\n\t %s' % tokens)
have_pos = True
elif tokens[1].upper() == 'SPECIES_POT':
tokens, l = get_tokens(lines, l)
while tokens and not tokens[0].upper() == '%ENDBLOCK':
if len(tokens) == 2:
calc.cell.species_pot = tuple(tokens)
tokens, l = get_tokens(lines, l)
elif tokens[1].upper() == 'IONIC_CONSTRAINTS':
while True:
if tokens and tokens[0].upper() == '%ENDBLOCK':
break
tokens, l = get_tokens(lines, l)
if not len(tokens) == 6:
continue
_, species, nic, x, y, z = tokens
nic = int(nic)
if (species, nic) not in raw_constraints:
raw_constraints[(species, nic)] = []
raw_constraints[(species, nic)].append(array(
[x, y, z]))
else:
print('Warning: the keyword %s is not' % tokens[1].upper())
print(' interpreted in cell files')
while not tokens[0].upper() == '%ENDBLOCK':
tokens, l = get_tokens(lines, l)
#raise UserWarning
else:
key = tokens[0]
value = ' '.join(tokens[1:])
try:
calc.__setattr__(key, value)
except:
print("Problem setting calc.cell.%s = %s" % (key, value))
raise
if pos_frac:
atoms = ase.Atoms(
calculator=calc,
cell=lat,
pbc=True,
scaled_positions=pos,
symbols=spec,
)
else:
atoms = ase.Atoms(
calculator=calc,
cell=lat,
pbc=True,
positions=pos,
symbols=spec,
)
fixed_atoms = []
for (species, nic), value in raw_constraints.iteritems():
absolute_nr = atoms.calc._get_absolute_number(species, nic)
if len(value) == 3:
fixed_atoms.append(absolute_nr)
elif len(value) == 2:
constraint = ase.constraints.FixedLine(a=absolute_nr,
direction=cross(value[0], value[1]))
constraints.append(constraint)
elif len(value) == 1:
constraint = ase.constraints.FixedPlane(a=absolute_nr,
direction=array(value[0], dtype=float32))
constraints.append(constraint)
else:
print('Error: Found %s statements attached to atoms %s'
% (len(value), absolute_nr))
constraints.append(ase.constraints.FixAtoms(fixed_atoms))
atoms.set_constraint(constraints)
# needs to go here again to have the constraints in
# atoms.calc.atoms.constraints as well
atoms.calc.atoms = atoms
atoms.calc.push_oldstate()
return atoms
# this actually does not belong here
# think how one could join this with
# the ase.calculators.castep.Castep.read()
# in the future!
def read_castep(filename, _=-1):
"""Reads a .castep file and returns an atoms object.
The calculator information will be stored in the calc attribute.
If more than one SCF step is found, a list of all steps
will be stored in the traj attribute.
Note that the index argument has no effect as of now.
"""
from ase.calculators.singlepoint import SinglePointCalculator
fileobj = open(filename)
lines = fileobj.readlines()
fileobj.close()
traj = []
energy_total = None
energy_0K = None
for i, line in enumerate(lines):
if 'NB est. 0K energy' in line:
energy_0K = float(line.split()[6])
elif 'Final energy, E' in line:
energy_total = float(line.split()[4])
elif 'Unit Cell' in line:
cell = [x.split()[0:3] for x in lines[i + 3:i + 6]]
cell = array([[float(col) for col in row] for row in cell])
elif 'Cell Contents' in line:
geom_starts = i
start_found = False
for j, jline in enumerate(lines[geom_starts:]):
if jline.find('xxxxx') > 0 and start_found:
geom_stop = j + geom_starts
break
if jline.find('xxxx') > 0 and not start_found:
geom_start = j + geom_starts + 4
start_found = True
species = [line.split()[1] for line in lines[geom_start:geom_stop]]
geom = dot(array([[float(col) for col in line.split()[3:6]]
for line in lines[geom_start:geom_stop]]), cell)
elif 'Writing model to' in line:
atoms = ase.Atoms(
cell=cell,
pbc=True,
positions=geom,
symbols=''.join(species),
)
# take 0K energy where available, else total energy
if energy_0K:
energy = energy_0K
else:
energy = energy_total
# generate a minimal single-point calculator
sp_calc = SinglePointCalculator(atoms=atoms,
energy=energy,
forces=None,
magmoms=None,
stress=None,
)
atoms.set_calculator(sp_calc)
traj.append(atoms)
return traj
def read_param(filename, calc=None):
"""Reads a param file. If an Castep object is passed as the
second argument, the parameter setings are merged into
the existing object and returned. Otherwise a new Castep()
calculator instance gets created and returned.
Parameters:
filename: the .param file. Only opens reading
calc: [Optional] calculator object to hang
parameters onto
"""
if calc is None:
calc = ase.calculators.castep.Castep(check_castep_version=False)
calc.merge_param(filename)
return calc
def write_param(filename, param, check_checkfile=False,
force_write=False,
interface_options=None):
"""Writes a CastepParam object to a CASTEP .param file
Parameters:
filename: the location of the file to write to. If it
exists it will be overwritten without warning. If it
doesn't it will be created.
param: a CastepParam instance
check_checkfile : if set to True, write_param will
only write continuation or reuse statement
if a restart file exists in the same directory
"""
if os.path.isfile(filename) and not force_write:
print('ase.io.castep.write_param: Set optional argument')
print('force_write=True to overwrite %s.' % filename)
return False
out = paropen(filename, 'w')
out.write('#######################################################\n')
out.write('#CASTEP param file: %s\n' % filename)
out.write('#Created using the Atomic Simulation Environment (ASE)#\n')
if interface_options is not None:
out.write('# Internal settings of the calculator\n')
out.write('# This can be switched off by settings\n')
out.write('# calc._export_settings = False\n')
out.write('# If stated, this will be automatically processed\n')
out.write('# by ase.io.castep.read_seed()\n')
for option, value in sorted(interface_options.iteritems()):
out.write('# ASE_INTERFACE %s : %s\n' % (option, value))
out.write('#######################################################\n\n')
for keyword, opt in sorted(param._options.iteritems()):
if opt.type == 'Defined':
if opt.value is not None:
out.write('%s\n' % (option))
elif opt.value is not None:
if keyword in ['continuation', 'reuse'] and check_checkfile:
if opt.value == 'default':
if not os.path.exists('%s.%s'\
% (os.path.splitext(filename)[0], 'check')):
continue
elif not os.path.exists(opt.value):
continue
out.write('%s : %s\n'
% (keyword, opt.value))
out.close()
def read_geom(filename, _=-1):
"""Reads a .geom file produced by the CASTEP GeometryOptimization task and
returns an atoms object.
The information about total free energy and forces of each atom for every
relaxation step will be stored for further analysis especially in a
single-point calculator.
Note that everything in the .geom file is in atomic units, which has
been conversed to commonly used unit angstrom(length) and eV (energy).
Note that the index argument has no effect as of now.
Contribution by Wei-Bing Zhang. Thanks!
"""
from ase.calculators.singlepoint import SinglePointCalculator
fileobj = open(filename)
txt = fileobj.readlines()
fileobj.close()
traj = []
# Source: CODATA2002, used by default
# in CASTEP 5.01
# but check with your version in case of error
# ase.units is based on CODATA1986/
# here we hard-code from http://physics.nist.gov/cuu/Document/all_2002.pdf
Hartree = 27.211384565719481
Bohr = 0.5291772108
print('N.B.: Energy in .geom file is not 0K extrapolated.')
for i, line in enumerate(txt):
if line.find("<-- E") > 0:
start_found = True
energy = float(line.split()[0]) * Hartree
cell = [x.split()[0:3] for x in txt[i + 1:i + 4]]
cell = array([[float(col) * Bohr for col in row] for row in
cell])
if line.find('<-- R') > 0 and start_found:
start_found = False
geom_start = i
for i, line in enumerate(txt[geom_start:]):
if line.find('<-- F') > 0:
geom_stop = i + geom_start
break
species = [line.split()[0] for line in
txt[geom_start:geom_stop]]
geom = array([[float(col) * Bohr for col in
line.split()[2:5]] for line in txt[geom_start:geom_stop]])
forces = array([[float(col) * Hartree / Bohr for col in
line.split()[2:5]] for line in
txt[geom_stop:geom_stop + (geom_stop - geom_start)]])
image = ase.Atoms(species, geom, cell=cell, pbc=True)
image.set_calculator(SinglePointCalculator(energy, forces, None,
None, image))
traj.append(image)
return traj
def read_seed(seed, new_seed=None, ignore_internal_keys=False):
"""A wrapper around the CASTEP Calculator in conjunction with
read_cell and read_param. Basically this can be used to reuse
a previous calculation which results in a triple of
cell/param/castep file. The label of the calculation if pre-
fixed with cop_of_ and everything else will be recycled as
much as possible from the addressed calculation.
"""
directory = os.path.abspath(os.path.dirname(seed))
seed = os.path.basename(seed)
paramfile = os.path.join(directory, '%s.param' % seed)
cellfile = os.path.join(directory, '%s.cell' % seed)
castepfile = os.path.join(directory, '%s.castep' % seed)
atoms = read_cell(cellfile)
atoms.calc._directory = directory
atoms.calc._rename_existing_dir = False
atoms.calc._castep_pp_path = directory
atoms.calc.merge_param(paramfile,
ignore_internal_keys=ignore_internal_keys)
if new_seed is None:
atoms.calc._label = 'copy_of_%s' % seed
else:
atoms.calc._label = str(new_seed)
if os.path.isfile(castepfile):
# _set_atoms needs to be True here
# but we set it right back to False
atoms.calc._set_atoms = True
atoms.calc.read(castepfile)
atoms.calc._set_atoms = False
# sync the top-level object with the
# one attached to the calculator
atoms = atoms.calc.atoms
else:
print('Corresponding CASTEP not found.')
atoms.calc.push_oldstate()
return atoms
| gpl-2.0 |
ijon/elliptics | bindings/python/routes.py | 1 | 2414 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# =============================================================================
# 2013+ Copyright (c) Kirill Smorodinnikov <shaitkir@gmail.com>
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# =============================================================================
import sys
sys.path.append('bindings/python/')
import elliptics
import argparse
def percentage(routes):
from operator import itemgetter
percentages = routes.percentages()
for group in percentages:
print 'Group {0}:'.format(group)
for host in percentages[group]:
for backend_id in percentages[group][host]:
print '\thost {0}/{1}\t{2:.2f}'.format(host, backend_id, percentages[group][host][backend_id])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Get remote route table and print its statistics.')
parser.add_argument('remotes', metavar='N', type=str, nargs='+',
help='Remote nodes to connect and grab route tables. Format: addr:port:family, where family = 2 for ipv4 and 10 for ipv6')
parser.add_argument('--percentage', dest='percentage', action='store_true',
help='if present, dump parts of DHT ring each node occupies (in percents)')
parser.add_argument('--log', default='/dev/stdout', help='log file')
parser.add_argument('--log-level', type=int, default=elliptics.log_level.error,
help='log level: %d-%d' % (elliptics.log_level.error, elliptics.log_level.debug))
args = parser.parse_args()
if len(args.remotes) == 0:
args.remotes = "localhost:1025:2"
log = elliptics.Logger(args.log, args.log_level)
n = elliptics.Node(log)
s = elliptics.Session(n)
try:
n.add_remotes(args.remotes)
except Exception as e:
print e
pass
routes = s.get_routes()
if args.percentage:
percentage(routes)
else:
print routes
| lgpl-3.0 |
Unidata/MetPy | v0.11/startingguide-1.py | 4 | 1432 | import matplotlib.pyplot as plt
import numpy as np
import metpy.calc as mpcalc
from metpy.plots import SkewT
from metpy.units import units
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig)
# Create arrays of pressure, temperature, dewpoint, and wind components
p = [902, 897, 893, 889, 883, 874, 866, 857, 849, 841, 833, 824, 812, 796, 776, 751,
727, 704, 680, 656, 629, 597, 565, 533, 501, 468, 435, 401, 366, 331, 295, 258,
220, 182, 144, 106] * units.hPa
t = [-3, -3.7, -4.1, -4.5, -5.1, -5.8, -6.5, -7.2, -7.9, -8.6, -8.9, -7.6, -6, -5.1,
-5.2, -5.6, -5.4, -4.9, -5.2, -6.3, -8.4, -11.5, -14.9, -18.4, -21.9, -25.4,
-28, -32, -37, -43, -49, -54, -56, -57, -58, -60] * units.degC
td = [-22, -22.1, -22.2, -22.3, -22.4, -22.5, -22.6, -22.7, -22.8, -22.9, -22.4,
-21.6, -21.6, -21.9, -23.6, -27.1, -31, -38, -44, -46, -43, -37, -34, -36,
-42, -46, -49, -48, -47, -49, -55, -63, -72, -88, -93, -92] * units.degC
# Calculate parcel profile
prof = mpcalc.parcel_profile(p, t[0], td[0]).to('degC')
u = np.linspace(-10, 10, len(p)) * units.knots
v = np.linspace(-20, 20, len(p)) * units.knots
skew.plot(p, t, 'r')
skew.plot(p, td, 'g')
skew.plot(p, prof, 'k') # Plot parcel profile
skew.plot_barbs(p[::5], u[::5], v[::5])
skew.ax.set_xlim(-50, 15)
skew.ax.set_ylim(1000, 100)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
plt.show() | bsd-3-clause |
sdiazpier/nest-simulator | pynest/nest/tests/test_rate_instantaneous_and_delayed.py | 20 | 3690 | # -*- coding: utf-8 -*-
#
# test_rate_instantaneous_and_delayed.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import unittest
import numpy as np
@nest.ll_api.check_stack
class RateInstantaneousAndDelayedTestCase(unittest.TestCase):
'''
Test whether delayed rate connections have same properties as
instantaneous connections but with the correct delay
'''
def test_rate_instantaneous_and_delayed(self):
# neuron parameters
neuron_params = {'tau': 5., 'sigma': 0.}
drive = 1.5
delay = 2.
weight = 0.5
# simulation parameters
simtime = 100.
dt = 0.001
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
nest.SetKernelStatus(
{'resolution': dt, 'use_wfr': True, 'print_time': False})
# set up rate neuron network
rate_neuron_drive = nest.Create(
'lin_rate_ipn', params={'mu': drive, 'sigma': 0.})
rate_neuron_1 = nest.Create(
'lin_rate_ipn', params=neuron_params)
rate_neuron_2 = nest.Create(
'lin_rate_ipn', params=neuron_params)
multimeter = nest.Create(
'multimeter', params={
'record_from': ['rate'],
'interval': dt})
# record rates and connect neurons
neurons = rate_neuron_1 + rate_neuron_2
nest.Connect(
multimeter, neurons, 'all_to_all', {'delay': 10.})
nest.Connect(rate_neuron_drive, rate_neuron_1,
'all_to_all',
{'synapse_model': 'rate_connection_instantaneous',
'weight': weight})
nest.Connect(rate_neuron_drive, rate_neuron_2,
'all_to_all', {'synapse_model': 'rate_connection_delayed',
'delay': delay,
'weight': weight})
# simulate
nest.Simulate(simtime)
# make sure shifted rates are identical
events = nest.GetStatus(multimeter)[0]['events']
senders = events['senders']
rate_1 = np.array(events['rate'][
np.where(senders == rate_neuron_1.get('global_id'))])
times_2 = np.array(events['times'][
np.where(senders == rate_neuron_2.get('global_id'))])
rate_2 = np.array(events['rate'][
np.where(senders == rate_neuron_2.get('global_id'))])
# get shifted rate_2
rate_2 = rate_2[times_2 > delay]
# adjust length of rate_1 to be able to substract
rate_1 = rate_1[:len(rate_2)]
assert(np.sum(np.abs(rate_2 - rate_1)) < 1e-12)
def suite():
# makeSuite is sort of obsolete http://bugs.python.org/issue2721
# using loadTestsFromTestCase instead.
suite1 = unittest.TestLoader().loadTestsFromTestCase(
RateInstantaneousAndDelayedTestCase)
return unittest.TestSuite([suite1])
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == '__main__':
run()
| gpl-2.0 |
ity/pants | src/python/pants/core_tasks/deferred_sources_mapper.py | 3 | 3049 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.source.payload_fields import DeferredSourcesField
from pants.task.task import Task
logger = logging.getLogger(__name__)
class DeferredSourcesMapper(Task):
"""Map DeferredSourcesFields to files that produce the product 'unpacked_archives'.
If you want a task to be able to map sources like this, make it require the 'deferred_sources'
product.
"""
class SourcesTargetLookupError(AddressLookupError):
"""Raised when the referenced target cannot be found in the build graph"""
pass
class NoUnpackedSourcesError(AddressLookupError):
"""Raised when there are no files found unpacked from the archive"""
pass
@classmethod
def product_types(cls):
"""
Declare product produced by this task
deferred_sources does not have any data associated with it. Downstream tasks can
depend on it just make sure that this task completes first.
:return:
"""
return ['deferred_sources']
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data('unpacked_archives')
def execute(self):
deferred_sources_fields = []
def find_deferred_sources_fields(target):
for name, payload_field in target.payload.fields:
if isinstance(payload_field, DeferredSourcesField):
deferred_sources_fields.append((target, name, payload_field))
addresses = [target.address for target in self.context.targets()]
self.context.build_graph.walk_transitive_dependency_graph(addresses,
find_deferred_sources_fields)
unpacked_sources = self.context.products.get_data('unpacked_archives')
for (target, name, payload_field) in deferred_sources_fields:
sources_target = self.context.build_graph.get_target(payload_field.address)
if not sources_target:
raise self.SourcesTargetLookupError(
"Couldn't find {sources_spec} referenced from {target} field {name} in build graph"
.format(sources_spec=payload_field.address.spec, target=target.address.spec, name=name))
if not sources_target in unpacked_sources:
raise self.NoUnpackedSourcesError(
"Target {sources_spec} referenced from {target} field {name} did not unpack any sources"
.format(spec=sources_target.address.spec, target=target.address.spec, name=name))
sources, rel_unpack_dir = unpacked_sources[sources_target]
# We have no idea if rel_unpack_dir matches any of our source root patterns, so
# we explicitly register it here.
self.context.source_roots.add_source_root(rel_unpack_dir)
payload_field.populate(sources, rel_unpack_dir)
| apache-2.0 |
gf53520/kafka | tests/kafkatest/tests/tools/log4j_appender_test.py | 5 | 3986 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.utils.util import wait_until
from ducktape.tests.test import Test
from ducktape.mark import matrix
from ducktape.mark.resource import cluster
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.kafka_log4j_appender import KafkaLog4jAppender
from kafkatest.services.security.security_config import SecurityConfig
TOPIC = "topic-log4j-appender"
MAX_MESSAGES = 100
class Log4jAppenderTest(Test):
"""
Tests KafkaLog4jAppender using VerifiableKafkaLog4jAppender that appends increasing ints to a Kafka topic
"""
def __init__(self, test_context):
super(Log4jAppenderTest, self).__init__(test_context)
self.num_zk = 1
self.num_brokers = 1
self.messages_received_count = 0
self.topics = {
TOPIC: {'partitions': 1, 'replication-factor': 1}
}
self.zk = ZookeeperService(test_context, self.num_zk)
def setUp(self):
self.zk.start()
def start_kafka(self, security_protocol, interbroker_security_protocol):
self.kafka = KafkaService(
self.test_context, self.num_brokers,
self.zk, security_protocol=security_protocol,
interbroker_security_protocol=interbroker_security_protocol, topics=self.topics)
self.kafka.start()
def start_appender(self, security_protocol):
self.appender = KafkaLog4jAppender(self.test_context, self.num_brokers, self.kafka, TOPIC, MAX_MESSAGES,
security_protocol=security_protocol)
self.appender.start()
def custom_message_validator(self, msg):
if msg and "INFO : org.apache.kafka.tools.VerifiableLog4jAppender" in msg:
self.logger.debug("Received message: %s" % msg)
self.messages_received_count += 1
def start_consumer(self):
self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_brokers, kafka=self.kafka, topic=TOPIC,
consumer_timeout_ms=10000,
message_validator=self.custom_message_validator)
self.consumer.start()
@cluster(num_nodes=4)
@matrix(security_protocol=['PLAINTEXT', 'SSL'])
@cluster(num_nodes=5)
@matrix(security_protocol=['SASL_PLAINTEXT', 'SASL_SSL'])
def test_log4j_appender(self, security_protocol='PLAINTEXT'):
"""
Tests if KafkaLog4jAppender is producing to Kafka topic
:return: None
"""
self.start_kafka(security_protocol, security_protocol)
self.start_appender(security_protocol)
self.appender.wait()
self.start_consumer()
node = self.consumer.nodes[0]
wait_until(lambda: self.consumer.alive(node),
timeout_sec=20, backoff_sec=.2, err_msg="Consumer was too slow to start")
# Verify consumed messages count
wait_until(lambda: self.messages_received_count == MAX_MESSAGES, timeout_sec=10,
err_msg="Timed out waiting to consume expected number of messages.")
self.consumer.stop()
| apache-2.0 |
Kazade/NeHe-Website | google_appengine/lib/django_1_2/django/contrib/localflavor/pl/pl_administrativeunits.py | 433 | 13194 | # -*- coding: utf-8 -*-
"""
Polish administrative units as in http://pl.wikipedia.org/wiki/Podzia%C5%82_administracyjny_Polski
"""
ADMINISTRATIVE_UNIT_CHOICES = (
('wroclaw', u'Wrocław'),
('jeleniagora', u'Jelenia Góra'),
('legnica', u'Legnica'),
('boleslawiecki', u'bolesławiecki'),
('dzierzoniowski', u'dzierżoniowski'),
('glogowski', u'głogowski'),
('gorowski', u'górowski'),
('jaworski', u'jaworski'),
('jeleniogorski', u'jeleniogórski'),
('kamiennogorski', u'kamiennogórski'),
('klodzki', u'kłodzki'),
('legnicki', u'legnicki'),
('lubanski', u'lubański'),
('lubinski', u'lubiński'),
('lwowecki', u'lwówecki'),
('milicki', u'milicki'),
('olesnicki', u'oleśnicki'),
('olawski', u'oławski'),
('polkowicki', u'polkowicki'),
('strzelinski', u'strzeliński'),
('sredzki', u'średzki'),
('swidnicki', u'świdnicki'),
('trzebnicki', u'trzebnicki'),
('walbrzyski', u'wałbrzyski'),
('wolowski', u'wołowski'),
('wroclawski', u'wrocławski'),
('zabkowicki', u'ząbkowicki'),
('zgorzelecki', u'zgorzelecki'),
('zlotoryjski', u'złotoryjski'),
('bydgoszcz', u'Bydgoszcz'),
('torun', u'Toruń'),
('wloclawek', u'Włocławek'),
('grudziadz', u'Grudziądz'),
('aleksandrowski', u'aleksandrowski'),
('brodnicki', u'brodnicki'),
('bydgoski', u'bydgoski'),
('chelminski', u'chełmiński'),
('golubsko-dobrzynski', u'golubsko-dobrzyński'),
('grudziadzki', u'grudziądzki'),
('inowroclawski', u'inowrocławski'),
('lipnowski', u'lipnowski'),
('mogilenski', u'mogileński'),
('nakielski', u'nakielski'),
('radziejowski', u'radziejowski'),
('rypinski', u'rypiński'),
('sepolenski', u'sępoleński'),
('swiecki', u'świecki'),
('torunski', u'toruński'),
('tucholski', u'tucholski'),
('wabrzeski', u'wąbrzeski'),
('wloclawski', u'wrocławski'),
('zninski', u'źniński'),
('lublin', u'Lublin'),
('biala-podlaska', u'Biała Podlaska'),
('chelm', u'Chełm'),
('zamosc', u'Zamość'),
('bialski', u'bialski'),
('bilgorajski', u'biłgorajski'),
('chelmski', u'chełmski'),
('hrubieszowski', u'hrubieszowski'),
('janowski', u'janowski'),
('krasnostawski', u'krasnostawski'),
('krasnicki', u'kraśnicki'),
('lubartowski', u'lubartowski'),
('lubelski', u'lubelski'),
('leczynski', u'łęczyński'),
('lukowski', u'łukowski'),
('opolski', u'opolski'),
('parczewski', u'parczewski'),
('pulawski', u'puławski'),
('radzynski', u'radzyński'),
('rycki', u'rycki'),
('swidnicki', u'świdnicki'),
('tomaszowski', u'tomaszowski'),
('wlodawski', u'włodawski'),
('zamojski', u'zamojski'),
('gorzow-wielkopolski', u'Gorzów Wielkopolski'),
('zielona-gora', u'Zielona Góra'),
('gorzowski', u'gorzowski'),
('krosnienski', u'krośnieński'),
('miedzyrzecki', u'międzyrzecki'),
('nowosolski', u'nowosolski'),
('slubicki', u'słubicki'),
('strzelecko-drezdenecki', u'strzelecko-drezdenecki'),
('sulecinski', u'suleńciński'),
('swiebodzinski', u'świebodziński'),
('wschowski', u'wschowski'),
('zielonogorski', u'zielonogórski'),
('zaganski', u'żagański'),
('zarski', u'żarski'),
('lodz', u'Łódź'),
('piotrkow-trybunalski', u'Piotrków Trybunalski'),
('skierniewice', u'Skierniewice'),
('belchatowski', u'bełchatowski'),
('brzezinski', u'brzeziński'),
('kutnowski', u'kutnowski'),
('laski', u'łaski'),
('leczycki', u'łęczycki'),
('lowicki', u'łowicki'),
('lodzki wschodni', u'łódzki wschodni'),
('opoczynski', u'opoczyński'),
('pabianicki', u'pabianicki'),
('pajeczanski', u'pajęczański'),
('piotrkowski', u'piotrkowski'),
('poddebicki', u'poddębicki'),
('radomszczanski', u'radomszczański'),
('rawski', u'rawski'),
('sieradzki', u'sieradzki'),
('skierniewicki', u'skierniewicki'),
('tomaszowski', u'tomaszowski'),
('wielunski', u'wieluński'),
('wieruszowski', u'wieruszowski'),
('zdunskowolski', u'zduńskowolski'),
('zgierski', u'zgierski'),
('krakow', u'Kraków'),
('tarnow', u'Tarnów'),
('nowy-sacz', u'Nowy Sącz'),
('bochenski', u'bocheński'),
('brzeski', u'brzeski'),
('chrzanowski', u'chrzanowski'),
('dabrowski', u'dąbrowski'),
('gorlicki', u'gorlicki'),
('krakowski', u'krakowski'),
('limanowski', u'limanowski'),
('miechowski', u'miechowski'),
('myslenicki', u'myślenicki'),
('nowosadecki', u'nowosądecki'),
('nowotarski', u'nowotarski'),
('olkuski', u'olkuski'),
('oswiecimski', u'oświęcimski'),
('proszowicki', u'proszowicki'),
('suski', u'suski'),
('tarnowski', u'tarnowski'),
('tatrzanski', u'tatrzański'),
('wadowicki', u'wadowicki'),
('wielicki', u'wielicki'),
('warszawa', u'Warszawa'),
('ostroleka', u'Ostrołęka'),
('plock', u'Płock'),
('radom', u'Radom'),
('siedlce', u'Siedlce'),
('bialobrzeski', u'białobrzeski'),
('ciechanowski', u'ciechanowski'),
('garwolinski', u'garwoliński'),
('gostyninski', u'gostyniński'),
('grodziski', u'grodziski'),
('grojecki', u'grójecki'),
('kozienicki', u'kozenicki'),
('legionowski', u'legionowski'),
('lipski', u'lipski'),
('losicki', u'łosicki'),
('makowski', u'makowski'),
('minski', u'miński'),
('mlawski', u'mławski'),
('nowodworski', u'nowodworski'),
('ostrolecki', u'ostrołęcki'),
('ostrowski', u'ostrowski'),
('otwocki', u'otwocki'),
('piaseczynski', u'piaseczyński'),
('plocki', u'płocki'),
('plonski', u'płoński'),
('pruszkowski', u'pruszkowski'),
('przasnyski', u'przasnyski'),
('przysuski', u'przysuski'),
('pultuski', u'pułtuski'),
('radomski', u'radomski'),
('siedlecki', u'siedlecki'),
('sierpecki', u'sierpecki'),
('sochaczewski', u'sochaczewski'),
('sokolowski', u'sokołowski'),
('szydlowiecki', u'szydłowiecki'),
('warszawski-zachodni', u'warszawski zachodni'),
('wegrowski', u'węgrowski'),
('wolominski', u'wołomiński'),
('wyszkowski', u'wyszkowski'),
('zwolenski', u'zwoleński'),
('zurominski', u'żuromiński'),
('zyrardowski', u'żyrardowski'),
('opole', u'Opole'),
('brzeski', u'brzeski'),
('glubczycki', u'głubczyski'),
('kedzierzynsko-kozielski', u'kędzierzyński-kozielski'),
('kluczborski', u'kluczborski'),
('krapkowicki', u'krapkowicki'),
('namyslowski', u'namysłowski'),
('nyski', u'nyski'),
('oleski', u'oleski'),
('opolski', u'opolski'),
('prudnicki', u'prudnicki'),
('strzelecki', u'strzelecki'),
('rzeszow', u'Rzeszów'),
('krosno', u'Krosno'),
('przemysl', u'Przemyśl'),
('tarnobrzeg', u'Tarnobrzeg'),
('bieszczadzki', u'bieszczadzki'),
('brzozowski', u'brzozowski'),
('debicki', u'dębicki'),
('jaroslawski', u'jarosławski'),
('jasielski', u'jasielski'),
('kolbuszowski', u'kolbuszowski'),
('krosnienski', u'krośnieński'),
('leski', u'leski'),
('lezajski', u'leżajski'),
('lubaczowski', u'lubaczowski'),
('lancucki', u'łańcucki'),
('mielecki', u'mielecki'),
('nizanski', u'niżański'),
('przemyski', u'przemyski'),
('przeworski', u'przeworski'),
('ropczycko-sedziszowski', u'ropczycko-sędziszowski'),
('rzeszowski', u'rzeszowski'),
('sanocki', u'sanocki'),
('stalowowolski', u'stalowowolski'),
('strzyzowski', u'strzyżowski'),
('tarnobrzeski', u'tarnobrzeski'),
('bialystok', u'Białystok'),
('lomza', u'Łomża'),
('suwalki', u'Suwałki'),
('augustowski', u'augustowski'),
('bialostocki', u'białostocki'),
('bielski', u'bielski'),
('grajewski', u'grajewski'),
('hajnowski', u'hajnowski'),
('kolnenski', u'kolneński'),
('łomzynski', u'łomżyński'),
('moniecki', u'moniecki'),
('sejnenski', u'sejneński'),
('siemiatycki', u'siematycki'),
('sokolski', u'sokólski'),
('suwalski', u'suwalski'),
('wysokomazowiecki', u'wysokomazowiecki'),
('zambrowski', u'zambrowski'),
('gdansk', u'Gdańsk'),
('gdynia', u'Gdynia'),
('slupsk', u'Słupsk'),
('sopot', u'Sopot'),
('bytowski', u'bytowski'),
('chojnicki', u'chojnicki'),
('czluchowski', u'człuchowski'),
('kartuski', u'kartuski'),
('koscierski', u'kościerski'),
('kwidzynski', u'kwidzyński'),
('leborski', u'lęborski'),
('malborski', u'malborski'),
('nowodworski', u'nowodworski'),
('gdanski', u'gdański'),
('pucki', u'pucki'),
('slupski', u'słupski'),
('starogardzki', u'starogardzki'),
('sztumski', u'sztumski'),
('tczewski', u'tczewski'),
('wejherowski', u'wejcherowski'),
('katowice', u'Katowice'),
('bielsko-biala', u'Bielsko-Biała'),
('bytom', u'Bytom'),
('chorzow', u'Chorzów'),
('czestochowa', u'Częstochowa'),
('dabrowa-gornicza', u'Dąbrowa Górnicza'),
('gliwice', u'Gliwice'),
('jastrzebie-zdroj', u'Jastrzębie Zdrój'),
('jaworzno', u'Jaworzno'),
('myslowice', u'Mysłowice'),
('piekary-slaskie', u'Piekary Śląskie'),
('ruda-slaska', u'Ruda Śląska'),
('rybnik', u'Rybnik'),
('siemianowice-slaskie', u'Siemianowice Śląskie'),
('sosnowiec', u'Sosnowiec'),
('swietochlowice', u'Świętochłowice'),
('tychy', u'Tychy'),
('zabrze', u'Zabrze'),
('zory', u'Żory'),
('bedzinski', u'będziński'),
('bielski', u'bielski'),
('bierunsko-ledzinski', u'bieruńsko-lędziński'),
('cieszynski', u'cieszyński'),
('czestochowski', u'częstochowski'),
('gliwicki', u'gliwicki'),
('klobucki', u'kłobucki'),
('lubliniecki', u'lubliniecki'),
('mikolowski', u'mikołowski'),
('myszkowski', u'myszkowski'),
('pszczynski', u'pszczyński'),
('raciborski', u'raciborski'),
('rybnicki', u'rybnicki'),
('tarnogorski', u'tarnogórski'),
('wodzislawski', u'wodzisławski'),
('zawiercianski', u'zawierciański'),
('zywiecki', u'żywiecki'),
('kielce', u'Kielce'),
('buski', u'buski'),
('jedrzejowski', u'jędrzejowski'),
('kazimierski', u'kazimierski'),
('kielecki', u'kielecki'),
('konecki', u'konecki'),
('opatowski', u'opatowski'),
('ostrowiecki', u'ostrowiecki'),
('pinczowski', u'pińczowski'),
('sandomierski', u'sandomierski'),
('skarzyski', u'skarżyski'),
('starachowicki', u'starachowicki'),
('staszowski', u'staszowski'),
('wloszczowski', u'włoszczowski'),
('olsztyn', u'Olsztyn'),
('elblag', u'Elbląg'),
('bartoszycki', u'bartoszycki'),
('braniewski', u'braniewski'),
('dzialdowski', u'działdowski'),
('elblaski', u'elbląski'),
('elcki', u'ełcki'),
('gizycki', u'giżycki'),
('goldapski', u'gołdapski'),
('ilawski', u'iławski'),
('ketrzynski', u'kętrzyński'),
('lidzbarski', u'lidzbarski'),
('mragowski', u'mrągowski'),
('nidzicki', u'nidzicki'),
('nowomiejski', u'nowomiejski'),
('olecki', u'olecki'),
('olsztynski', u'olsztyński'),
('ostrodzki', u'ostródzki'),
('piski', u'piski'),
('szczycienski', u'szczycieński'),
('wegorzewski', u'węgorzewski'),
('poznan', u'Poznań'),
('kalisz', u'Kalisz'),
('konin', u'Konin'),
('leszno', u'Leszno'),
('chodzieski', u'chodziejski'),
('czarnkowsko-trzcianecki', u'czarnkowsko-trzcianecki'),
('gnieznienski', u'gnieźnieński'),
('gostynski', u'gostyński'),
('grodziski', u'grodziski'),
('jarocinski', u'jarociński'),
('kaliski', u'kaliski'),
('kepinski', u'kępiński'),
('kolski', u'kolski'),
('koninski', u'koniński'),
('koscianski', u'kościański'),
('krotoszynski', u'krotoszyński'),
('leszczynski', u'leszczyński'),
('miedzychodzki', u'międzychodzki'),
('nowotomyski', u'nowotomyski'),
('obornicki', u'obornicki'),
('ostrowski', u'ostrowski'),
('ostrzeszowski', u'ostrzeszowski'),
('pilski', u'pilski'),
('pleszewski', u'pleszewski'),
('poznanski', u'poznański'),
('rawicki', u'rawicki'),
('slupecki', u'słupecki'),
('szamotulski', u'szamotulski'),
('sredzki', u'średzki'),
('sremski', u'śremski'),
('turecki', u'turecki'),
('wagrowiecki', u'wągrowiecki'),
('wolsztynski', u'wolsztyński'),
('wrzesinski', u'wrzesiński'),
('zlotowski', u'złotowski'),
('bialogardzki', u'białogardzki'),
('choszczenski', u'choszczeński'),
('drawski', u'drawski'),
('goleniowski', u'goleniowski'),
('gryficki', u'gryficki'),
('gryfinski', u'gryfiński'),
('kamienski', u'kamieński'),
('kolobrzeski', u'kołobrzeski'),
('koszalinski', u'koszaliński'),
('lobeski', u'łobeski'),
('mysliborski', u'myśliborski'),
('policki', u'policki'),
('pyrzycki', u'pyrzycki'),
('slawienski', u'sławieński'),
('stargardzki', u'stargardzki'),
('szczecinecki', u'szczecinecki'),
('swidwinski', u'świdwiński'),
('walecki', u'wałecki'),
)
| bsd-3-clause |
zstackorg/zstack-woodpecker | integrationtest/vm/virtualrouter/snapshot/test_create_snapshot.py | 4 | 3055 | '''
Test create/restore snapshot functions. In this test, the snapshot was created
on unattached data volume.
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.zstack_test.zstack_test_snapshot as zstack_sp_header
import os
import time
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Create test vm as utility vm')
vm = test_stub.create_vlan_vm()
test_obj_dict.add_vm(vm)
#vm1 = test_stub.create_vlan_vm()
#test_obj_dict.add_vm(vm1)
test_util.test_dsc('Create volume for snapshot testing')
disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'))
volume_creation_option = test_util.VolumeOption()
volume_creation_option.set_name('volume for snapshot testing')
volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
volume = test_stub.create_volume(volume_creation_option)
test_obj_dict.add_volume(volume)
#make sure utility vm is starting and running
vm.check()
volume.attach(vm)
volume.detach()
#vm1.stop()
test_util.test_dsc('create snapshot and check')
#snapshots = zstack_sp_header.ZstackVolumeSnapshot()
#snapshots.set_target_volume(volume)
#test_obj_dict.add_volume_snapshot(snapshots)
snapshots = test_obj_dict.get_volume_snapshot(volume.get_volume().uuid)
snapshots.set_utility_vm(vm)
snapshots.create_snapshot('create_snapshot1')
snapshots.check()
snapshot1 = snapshots.get_current_snapshot()
snapshots.create_snapshot('create_snapshot2')
snapshots.check()
snapshots.create_snapshot('create_snapshot3')
snapshots.check()
snapshot3 = snapshots.get_current_snapshot()
snapshots.use_snapshot(snapshot1)
snapshots.create_snapshot('create_snapshot1.1.1')
snapshots.check()
snapshots.create_snapshot('create_snapshot1.1.2')
snapshots.check()
snapshots.use_snapshot(snapshot1)
snapshots.create_snapshot('create_snapshot1.2.1')
snapshots.check()
snapshot1_2_1 = snapshots.get_current_snapshot()
snapshots.create_snapshot('create_snapshot1.2.2')
snapshots.check()
snapshots.use_snapshot(snapshot3)
snapshots.check()
snapshots.create_snapshot('create_snapshot4')
snapshots.check()
test_util.test_dsc('Delete snapshot, volume and check')
snapshots.delete_snapshot(snapshot3)
snapshots.check()
snapshots.delete_snapshot(snapshot1_2_1)
snapshots.check()
snapshots.delete()
test_obj_dict.rm_volume_snapshot(snapshots)
volume.check()
volume.delete()
test_obj_dict.rm_volume(volume)
vm.destroy()
test_util.test_pass('Create Snapshot test Success')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 |
imsparsh/python-for-android | python3-alpha/python3-src/Lib/copy.py | 51 | 10331 | """Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
import weakref
from copyreg import dispatch_table
import builtins
class Error(Exception):
pass
error = Error # backward compatibility
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
__all__ = ["Error", "copy", "deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
copier = getattr(cls, "__copy__", None)
if copier:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error("un(shallow)copyable object of type %s" % cls)
return _reconstruct(x, rv, 0)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, float, bool, str, tuple,
frozenset, type, range,
types.BuiltinFunctionType, type(Ellipsis),
types.FunctionType, weakref.ref):
d[t] = _copy_immutable
t = getattr(types, "CodeType", None)
if t is not None:
d[t] = _copy_immutable
for name in ("complex", "unicode"):
t = getattr(builtins, name, None)
if t is not None:
d[t] = _copy_immutable
def _copy_with_constructor(x):
return type(x)(x)
for t in (list, dict, set):
d[t] = _copy_with_constructor
def _copy_with_copy_method(x):
return x.copy()
if PyStringMap is not None:
d[PyStringMap] = _copy_with_copy_method
del d
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[type(Ellipsis)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
try:
d[complex] = _deepcopy_atomic
except NameError:
pass
d[bytes] = _deepcopy_atomic
d[str] = _deepcopy_atomic
try:
d[types.CodeType] = _deepcopy_atomic
except AttributeError:
pass
d[type] = _deepcopy_atomic
d[range] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
d[weakref.ref] = _deepcopy_atomic
def _deepcopy_list(x, memo):
y = []
memo[id(x)] = y
for a in x:
y.append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
d = id(x)
try:
return memo[d]
except KeyError:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
memo[d] = y
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo):
y = {}
memo[id(x)] = y
for key, value in x.items():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _deepcopy_method(x, memo): # Copy instance methods
return type(x)(x.__func__, deepcopy(x.__self__, memo))
_deepcopy_dispatch[types.MethodType] = _deepcopy_method
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _reconstruct(x, info, deep, memo=None):
if isinstance(info, str):
return x
assert isinstance(info, tuple)
if memo is None:
memo = {}
n = len(info)
assert n in (2, 3, 4, 5)
callable, args = info[:2]
if n > 2:
state = info[2]
else:
state = {}
if n > 3:
listiter = info[3]
else:
listiter = None
if n > 4:
dictiter = info[4]
else:
dictiter = None
if deep:
args = deepcopy(args, memo)
y = callable(*args)
memo[id(x)] = y
if state:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.items():
setattr(y, key, value)
if listiter is not None:
for item in listiter:
if deep:
item = deepcopy(item, memo)
y.append(item)
if dictiter is not None:
for key, value in dictiter:
if deep:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
return y
del d
del types
# Helper for instance creation without calling __init__
class _EmptyClass:
pass
def _test():
l = [None, 1, 2, 3.14, 'xyzzy', (1, 2), [3.14, 'abc'],
{'abc': 'ABC'}, (), [], {}]
l1 = copy(l)
print(l1==l)
l1 = map(copy, l)
print(l1==l)
l1 = deepcopy(l)
print(l1==l)
class C:
def __init__(self, arg=None):
self.a = 1
self.arg = arg
if __name__ == '__main__':
import sys
file = sys.argv[0]
else:
file = __file__
self.fp = open(file)
self.fp.close()
def __getstate__(self):
return {'a': self.a, 'arg': self.arg}
def __setstate__(self, state):
for key, value in state.items():
setattr(self, key, value)
def __deepcopy__(self, memo=None):
new = self.__class__(deepcopy(self.arg, memo))
new.a = self.a
return new
c = C('argument sketch')
l.append(c)
l2 = copy(l)
print(l == l2)
print(l)
print(l2)
l2 = deepcopy(l)
print(l == l2)
print(l)
print(l2)
l.append({l[1]: l, 'xyz': l[2]})
l3 = copy(l)
import reprlib
print(map(reprlib.repr, l))
print(map(reprlib.repr, l1))
print(map(reprlib.repr, l2))
print(map(reprlib.repr, l3))
l3 = deepcopy(l)
print(map(reprlib.repr, l))
print(map(reprlib.repr, l1))
print(map(reprlib.repr, l2))
print(map(reprlib.repr, l3))
class odict(dict):
def __init__(self, d = {}):
self.a = 99
dict.__init__(self, d)
def __setitem__(self, k, i):
dict.__setitem__(self, k, i)
self.a
o = odict({"A" : "B"})
x = deepcopy(o)
print(o, x)
if __name__ == '__main__':
_test()
| apache-2.0 |
hglkrijger/azure-linux-automation | remote-scripts/start-server-without-stopping.py | 8 | 1488 | #!/usr/bin/python
##########################################
#THIS SCRIPT ACCETPS SOME SERVER PARAMETERS.
#PLEASE RUN THE SCRIPT WITH -h OR -help FOR MORE DETAILS.
##########################################
from azuremodules import *
import argparse
import sys
#for error checking
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--udp', help='switch : starts the server in udp data packets listening mode.', choices=['yes', 'no'] )
parser.add_argument('-p', '--port', help='specifies which port should be used', required=True, type= int)
parser.add_argument('-m', '--mss_print', help='Maximum Segment Size display ', choices=['yes', 'no'])
parser.add_argument('-M', '--mss', help='Maximum Segment Size Settings', type = int)
parser.add_argument('-log', '--log', help='Log File')
parser.add_argument('-i', '--interval', help='specifies frequency of the output to be displyed on screen', type= int)
args = parser.parse_args()
#if no value specified then stop
command = 'iperf -s' + ' -p' + str(args.port) + ' -f K'
if args.interval != None :
command = command + ' -i' + str(args.interval)
if args.udp == 'yes':
command = command + ' -u'
if args.mss != None:
command = command + ' -M' + str(args.mss)
if args.mss_print == 'yes':
command = command + ' -m'
#finalCommand = 'nohup ' + command + ' >> ' + str(args.log) + ' &'
finalCommand = command + ' >> ' + str(args.log)
server = finalCommand
temp = Run(server)
tmp = Run("sleep 1")
| apache-2.0 |
cvegaj/ElectriCERT | venv3/lib/python3.6/site-packages/py/_error.py | 5 | 2928 | """
create errno-specific classes for IO or os calls.
"""
import sys, os, errno
class Error(EnvironmentError):
def __repr__(self):
return "%s.%s %r: %s " %(self.__class__.__module__,
self.__class__.__name__,
self.__class__.__doc__,
" ".join(map(str, self.args)),
#repr(self.args)
)
def __str__(self):
s = "[%s]: %s" %(self.__class__.__doc__,
" ".join(map(str, self.args)),
)
return s
_winerrnomap = {
2: errno.ENOENT,
3: errno.ENOENT,
17: errno.EEXIST,
18: errno.EXDEV,
13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable
22: errno.ENOTDIR,
20: errno.ENOTDIR,
267: errno.ENOTDIR,
5: errno.EACCES, # anything better?
}
class ErrorMaker(object):
""" lazily provides Exception classes for each possible POSIX errno
(as defined per the 'errno' module). All such instances
subclass EnvironmentError.
"""
Error = Error
_errno2class = {}
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError(name)
eno = getattr(errno, name)
cls = self._geterrnoclass(eno)
setattr(self, name, cls)
return cls
def _geterrnoclass(self, eno):
try:
return self._errno2class[eno]
except KeyError:
clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,))
errorcls = type(Error)(clsname, (Error,),
{'__module__':'py.error',
'__doc__': os.strerror(eno)})
self._errno2class[eno] = errorcls
return errorcls
def checked_call(self, func, *args, **kwargs):
""" call a function and raise an errno-exception if applicable. """
__tracebackhide__ = True
try:
return func(*args, **kwargs)
except self.Error:
raise
except (OSError, EnvironmentError):
cls, value, tb = sys.exc_info()
if not hasattr(value, 'errno'):
raise
__tracebackhide__ = False
errno = value.errno
try:
if not isinstance(value, WindowsError):
raise NameError
except NameError:
# we are not on Windows, or we got a proper OSError
cls = self._geterrnoclass(errno)
else:
try:
cls = self._geterrnoclass(_winerrnomap[errno])
except KeyError:
raise value
raise cls("%s%r" % (func.__name__, args))
__tracebackhide__ = True
error = ErrorMaker()
| gpl-3.0 |
yhoogstrate/tools-iuc | tools/progressivemauve/xmfa2gff3.py | 4 | 5559 | #!/usr/bin/env python
import argparse
import logging
import sys
from BCBio import GFF
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqFeature import (
FeatureLocation,
SeqFeature
)
from Bio.SeqRecord import SeqRecord
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def parse_xmfa(xmfa):
"""Simple XMFA parser until https://github.com/biopython/biopython/pull/544
"""
current_lcb = []
current_seq = {}
for line in xmfa.readlines():
if line.startswith('#'):
continue
if line.strip() == '=':
if 'id' in current_seq:
current_lcb.append(current_seq)
current_seq = {}
yield current_lcb
current_lcb = []
else:
line = line.strip()
if line.startswith('>'):
if 'id' in current_seq:
current_lcb.append(current_seq)
current_seq = {}
data = line.strip().split()
id, loc = data[1].split(':')
start, end = loc.split('-')
current_seq = {
'rid': '_'.join(data[1:]),
'id': id,
'start': int(start),
'end': int(end),
'strand': 1 if data[2] == '+' else -1,
'seq': ''
}
else:
current_seq['seq'] += line.strip()
def _percent_identity(a, b):
"""Calculate % identity, ignoring gaps in the host sequence
"""
match = 0
mismatch = 0
for char_a, char_b in zip(list(a), list(b)):
if char_a == '-':
continue
if char_a == char_b:
match += 1
else:
mismatch += 1
if match + mismatch == 0:
return 0
return 100 * float(match) / (match + mismatch)
def _id_tn_dict(sequences):
"""Figure out sequence IDs
"""
label_convert = {}
if sequences is not None:
if len(sequences) == 1:
for i, record in enumerate(SeqIO.parse(sequences[0], 'fasta')):
label_convert[str(i + 1)] = record.id
else:
for i, sequence in enumerate(sequences):
for record in SeqIO.parse(sequence, 'fasta'):
label_convert[str(i + 1)] = record.id
continue
return label_convert
def convert_xmfa_to_gff3(xmfa_file, relative_to='1', sequences=None, window_size=1000):
label_convert = _id_tn_dict(sequences)
lcbs = parse_xmfa(xmfa_file)
records = [SeqRecord(Seq("A"), id=label_convert.get(relative_to, relative_to))]
for lcb in lcbs:
ids = [seq['id'] for seq in lcb]
# Doesn't match part of our sequence
if relative_to not in ids:
continue
# Skip sequences that are JUST our "relative_to" genome
if len(ids) == 1:
continue
parent = [seq for seq in lcb if seq['id'] == relative_to][0]
others = [seq for seq in lcb if seq['id'] != relative_to]
for other in others:
other['feature'] = SeqFeature(
FeatureLocation(parent['start'], parent['end'] + 1),
type="match", strand=parent['strand'],
qualifiers={
"source": "progressiveMauve",
"target": label_convert.get(other['id'], other['id']),
"ID": label_convert.get(other['id'], 'xmfa_' + other['rid'])
}
)
for i in range(0, len(lcb[0]['seq']), window_size):
block_seq = parent['seq'][i:i + window_size]
real_window_size = len(block_seq)
real_start = abs(parent['start']) - parent['seq'][0:i].count('-') + i
real_end = real_start + real_window_size - block_seq.count('-')
if (real_end - real_start) < 10:
continue
if parent['start'] < 0:
strand = -1
else:
strand = 1
for other in others:
pid = _percent_identity(block_seq, other['seq'][i:i + real_window_size])
# Ignore 0% identity sequences
if pid == 0:
continue
other['feature'].sub_features.append(
SeqFeature(
FeatureLocation(real_start, real_end),
type="match_part", strand=strand,
qualifiers={
"source": "progressiveMauve",
'score': pid
}
)
)
for other in others:
records[0].features.append(other['feature'])
return records
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert XMFA alignments to gff3', prog='xmfa2gff3')
parser.add_argument('xmfa_file', type=file, help='XMFA File')
parser.add_argument('--window_size', type=int, help='Window size for analysis', default=1000)
parser.add_argument('--relative_to', type=str, help='Index of the parent sequence in the MSA', default='1')
parser.add_argument('--sequences', type=file, nargs='+',
help='Fasta files (in same order) passed to parent for reconstructing proper IDs')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
args = parser.parse_args()
result = convert_xmfa_to_gff3(**vars(args))
GFF.write(result, sys.stdout)
| mit |
susansalkeld/discsongs | discsongs/lib/python2.7/site-packages/pip/_vendor/requests/utils.py | 303 | 19973 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*'
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in value.split(","):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
| mit |
pratikmallya/hue | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/Util/asn1.py | 122 | 12727 | # -*- coding: ascii -*-
#
# Util/asn1.py : Minimal support for ASN.1 DER binary encoding.
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from Crypto.Util.number import long_to_bytes, bytes_to_long
import sys
from Crypto.Util.py3compat import *
__all__ = [ 'DerObject', 'DerInteger', 'DerOctetString', 'DerNull', 'DerSequence', 'DerObjectId' ]
class DerObject:
"""Base class for defining a single DER object.
Instantiate this class ONLY when you have to decode a DER element.
"""
# Known TAG types
typeTags = { 'SEQUENCE': 0x30, 'BIT STRING': 0x03, 'INTEGER': 0x02,
'OCTET STRING': 0x04, 'NULL': 0x05, 'OBJECT IDENTIFIER': 0x06 }
def __init__(self, ASN1Type=None, payload=b('')):
"""Initialize the DER object according to a specific type.
The ASN.1 type is either specified as the ASN.1 string (e.g.
'SEQUENCE'), directly with its numerical tag or with no tag
at all (None)."""
if isInt(ASN1Type) or ASN1Type is None:
self.typeTag = ASN1Type
else:
if len(ASN1Type)==1:
self.typeTag = ord(ASN1Type)
else:
self.typeTag = self.typeTags.get(ASN1Type)
self.payload = payload
def isType(self, ASN1Type):
return self.typeTags[ASN1Type]==self.typeTag
def _lengthOctets(self, payloadLen):
"""Return a byte string that encodes the given payload length (in
bytes) in a format suitable for a DER length tag (L).
"""
if payloadLen>127:
encoding = long_to_bytes(payloadLen)
return bchr(len(encoding)+128) + encoding
return bchr(payloadLen)
def encode(self):
"""Return a complete DER element, fully encoded as a TLV."""
return bchr(self.typeTag) + self._lengthOctets(len(self.payload)) + self.payload
def _decodeLen(self, idx, der):
"""Given a (part of a) DER element, and an index to the first byte of
a DER length tag (L), return a tuple with the payload size,
and the index of the first byte of the such payload (V).
Raises a ValueError exception if the DER length is invalid.
Raises an IndexError exception if the DER element is too short.
"""
length = bord(der[idx])
if length<=127:
return (length,idx+1)
payloadLength = bytes_to_long(der[idx+1:idx+1+(length & 0x7F)])
if payloadLength<=127:
raise ValueError("Not a DER length tag.")
return (payloadLength, idx+1+(length & 0x7F))
def decode(self, derEle, noLeftOvers=0):
"""Decode a complete DER element, and re-initializes this
object with it.
@param derEle A complete DER element. It must start with a DER T
tag.
@param noLeftOvers Indicate whether it is acceptable to complete the
parsing of the DER element and find that not all
bytes in derEle have been used.
@return Index of the first unused byte in the given DER element.
Raises a ValueError exception in case of parsing errors.
Raises an IndexError exception if the DER element is too short.
"""
try:
self.typeTag = bord(derEle[0])
if (self.typeTag & 0x1F)==0x1F:
raise ValueError("Unsupported DER tag")
(length,idx) = self._decodeLen(1, derEle)
if noLeftOvers and len(derEle) != (idx+length):
raise ValueError("Not a DER structure")
self.payload = derEle[idx:idx+length]
except IndexError:
raise ValueError("Not a valid DER SEQUENCE.")
return idx+length
class DerInteger(DerObject):
def __init__(self, value = 0):
"""Class to model an INTEGER DER element.
Limitation: only non-negative values are supported.
"""
DerObject.__init__(self, 'INTEGER')
self.value = value
def encode(self):
"""Return a complete INTEGER DER element, fully encoded as a TLV."""
self.payload = long_to_bytes(self.value)
if bord(self.payload[0])>127:
self.payload = bchr(0x00) + self.payload
return DerObject.encode(self)
def decode(self, derEle, noLeftOvers=0):
"""Decode a complete INTEGER DER element, and re-initializes this
object with it.
@param derEle A complete INTEGER DER element. It must start with a DER
INTEGER tag.
@param noLeftOvers Indicate whether it is acceptable to complete the
parsing of the DER element and find that not all
bytes in derEle have been used.
@return Index of the first unused byte in the given DER element.
Raises a ValueError exception if the DER element is not a
valid non-negative INTEGER.
Raises an IndexError exception if the DER element is too short.
"""
tlvLength = DerObject.decode(self, derEle, noLeftOvers)
if self.typeTag!=self.typeTags['INTEGER']:
raise ValueError ("Not a DER INTEGER.")
if bord(self.payload[0])>127:
raise ValueError ("Negative INTEGER.")
self.value = bytes_to_long(self.payload)
return tlvLength
class DerSequence(DerObject):
"""Class to model a SEQUENCE DER element.
This object behave like a dynamic Python sequence.
Sub-elements that are INTEGERs, look like Python integers.
Any other sub-element is a binary string encoded as the complete DER
sub-element (TLV).
"""
def __init__(self, startSeq=None):
"""Initialize the SEQUENCE DER object. Always empty
initially."""
DerObject.__init__(self, 'SEQUENCE')
if startSeq==None:
self._seq = []
else:
self._seq = startSeq
## A few methods to make it behave like a python sequence
def __delitem__(self, n):
del self._seq[n]
def __getitem__(self, n):
return self._seq[n]
def __setitem__(self, key, value):
self._seq[key] = value
def __setslice__(self,i,j,sequence):
self._seq[i:j] = sequence
def __delslice__(self,i,j):
del self._seq[i:j]
def __getslice__(self, i, j):
return self._seq[max(0, i):max(0, j)]
def __len__(self):
return len(self._seq)
def append(self, item):
return self._seq.append(item)
def hasInts(self):
"""Return the number of items in this sequence that are numbers."""
return len(filter(isInt, self._seq))
def hasOnlyInts(self):
"""Return True if all items in this sequence are numbers."""
return self._seq and self.hasInts()==len(self._seq)
def encode(self):
"""Return the DER encoding for the ASN.1 SEQUENCE, containing
the non-negative integers and longs added to this object.
Limitation: Raises a ValueError exception if it some elements
in the sequence are neither Python integers nor complete DER INTEGERs.
"""
self.payload = b('')
for item in self._seq:
try:
self.payload += item
except:
try:
self.payload += DerInteger(item).encode()
except:
raise ValueError("Trying to DER encode an unknown object")
return DerObject.encode(self)
def decode(self, derEle, noLeftOvers=0):
"""Decode a complete SEQUENCE DER element, and re-initializes this
object with it.
@param derEle A complete SEQUENCE DER element. It must start with a DER
SEQUENCE tag.
@param noLeftOvers Indicate whether it is acceptable to complete the
parsing of the DER element and find that not all
bytes in derEle have been used.
@return Index of the first unused byte in the given DER element.
DER INTEGERs are decoded into Python integers. Any other DER
element is not decoded. Its validity is not checked.
Raises a ValueError exception if the DER element is not a
valid DER SEQUENCE.
Raises an IndexError exception if the DER element is too short.
"""
self._seq = []
try:
tlvLength = DerObject.decode(self, derEle, noLeftOvers)
if self.typeTag!=self.typeTags['SEQUENCE']:
raise ValueError("Not a DER SEQUENCE.")
# Scan one TLV at once
idx = 0
while idx<len(self.payload):
typeTag = bord(self.payload[idx])
if typeTag==self.typeTags['INTEGER']:
newInteger = DerInteger()
idx += newInteger.decode(self.payload[idx:])
self._seq.append(newInteger.value)
else:
itemLen,itemIdx = self._decodeLen(idx+1,self.payload)
self._seq.append(self.payload[idx:itemIdx+itemLen])
idx = itemIdx + itemLen
except IndexError:
raise ValueError("Not a valid DER SEQUENCE.")
return tlvLength
class DerOctetString(DerObject):
def __init__(self, value = b('')):
DerObject.__init__(self, 'OCTET STRING')
self.payload = value
def decode(self, derEle, noLeftOvers=0):
p = DerObject.decode(derEle, noLeftOvers)
if not self.isType("OCTET STRING"):
raise ValueError("Not a valid OCTET STRING.")
return p
class DerNull(DerObject):
def __init__(self):
DerObject.__init__(self, 'NULL')
class DerObjectId(DerObject):
def __init__(self):
DerObject.__init__(self, 'OBJECT IDENTIFIER')
def decode(self, derEle, noLeftOvers=0):
p = DerObject.decode(derEle, noLeftOvers)
if not self.isType("OBJECT IDENTIFIER"):
raise ValueError("Not a valid OBJECT IDENTIFIER.")
return p
def isInt(x):
test = 0
try:
test += x
except TypeError:
return 0
return 1
| apache-2.0 |
mnlipp/mbed | workspace_tools/make.py | 35 | 11383 | #! /usr/bin/env python2
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
TEST BUILD & RUN
"""
import sys
from time import sleep
from shutil import copy
from os.path import join, abspath, dirname
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from workspace_tools.utils import args_error
from workspace_tools.paths import BUILD_DIR
from workspace_tools.paths import RTOS_LIBRARIES
from workspace_tools.paths import ETH_LIBRARY
from workspace_tools.paths import USB_HOST_LIBRARIES, USB_LIBRARIES
from workspace_tools.paths import DSP_LIBRARIES
from workspace_tools.paths import FS_LIBRARY
from workspace_tools.paths import UBLOX_LIBRARY
from workspace_tools.tests import TESTS, Test, TEST_MAP
from workspace_tools.tests import TEST_MBED_LIB
from workspace_tools.targets import TARGET_MAP
from workspace_tools.options import get_default_options_parser
from workspace_tools.build_api import build_project
try:
import workspace_tools.private_settings as ps
except:
ps = object()
if __name__ == '__main__':
# Parse Options
parser = get_default_options_parser()
parser.add_option("-p",
type="int",
dest="program",
help="The index of the desired test program: [0-%d]" % (len(TESTS)-1))
parser.add_option("-n",
dest="program_name",
help="The name of the desired test program")
parser.add_option("-j", "--jobs",
type="int",
dest="jobs",
default=1,
help="Number of concurrent jobs (default 1). Use 0 for auto based on host machine's number of CPUs")
parser.add_option("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose diagnostic output")
parser.add_option("--silent",
action="store_true",
dest="silent",
default=False,
help="Silent diagnostic output (no copy, compile notification)")
parser.add_option("-D", "",
action="append",
dest="macros",
help="Add a macro definition")
# Local run
parser.add_option("--automated", action="store_true", dest="automated",
default=False, help="Automated test")
parser.add_option("--host", dest="host_test",
default=None, help="Host test")
parser.add_option("--extra", dest="extra",
default=None, help="Extra files")
parser.add_option("--peripherals", dest="peripherals",
default=None, help="Required peripherals")
parser.add_option("--dep", dest="dependencies",
default=None, help="Dependencies")
parser.add_option("--source", dest="source_dir",
default=None, help="The source (input) directory")
parser.add_option("--duration", type="int", dest="duration",
default=None, help="Duration of the test")
parser.add_option("--build", dest="build_dir",
default=None, help="The build (output) directory")
parser.add_option("-d", "--disk", dest="disk",
default=None, help="The mbed disk")
parser.add_option("-s", "--serial", dest="serial",
default=None, help="The mbed serial port")
parser.add_option("-b", "--baud", type="int", dest="baud",
default=None, help="The mbed serial baud rate")
parser.add_option("-L", "--list-tests", action="store_true", dest="list_tests",
default=False, help="List available tests in order and exit")
# Ideally, all the tests with a single "main" thread can be run with, or
# without the rtos, eth, usb_host, usb, dsp, fat, ublox
parser.add_option("--rtos",
action="store_true", dest="rtos",
default=False, help="Link with RTOS library")
parser.add_option("--eth",
action="store_true", dest="eth",
default=False,
help="Link with Ethernet library")
parser.add_option("--usb_host",
action="store_true",
dest="usb_host",
default=False,
help="Link with USB Host library")
parser.add_option("--usb",
action="store_true",
dest="usb",
default=False,
help="Link with USB Device library")
parser.add_option("--dsp",
action="store_true",
dest="dsp",
default=False,
help="Link with DSP library")
parser.add_option("--fat",
action="store_true",
dest="fat",
default=False,
help="Link with FS ad SD card file system library")
parser.add_option("--ublox",
action="store_true",
dest="ublox",
default=False,
help="Link with U-Blox library")
parser.add_option("--testlib",
action="store_true",
dest="testlib",
default=False,
help="Link with mbed test library")
# Specify a different linker script
parser.add_option("-l", "--linker", dest="linker_script",
default=None, help="use the specified linker script")
(options, args) = parser.parse_args()
# Print available tests in order and exit
if options.list_tests is True:
print '\n'.join(map(str, sorted(TEST_MAP.values())))
sys.exit()
# force program to "0" if a source dir is specified
if options.source_dir is not None:
p = 0
n = None
else:
# Program Number or name
p, n = options.program, options.program_name
if n is not None and p is not None:
args_error(parser, "[ERROR] specify either '-n' or '-p', not both")
if n:
# We will transform 'n' to list of 'p' (integers which are test numbers)
nlist = n.split(',')
for test_id in nlist:
if test_id not in TEST_MAP.keys():
args_error(parser, "[ERROR] Program with name '%s' not found"% test_id)
p = [TEST_MAP[n].n for n in nlist]
elif p is None or (p < 0) or (p > (len(TESTS)-1)):
message = "[ERROR] You have to specify one of the following tests:\n"
message += '\n'.join(map(str, sorted(TEST_MAP.values())))
args_error(parser, message)
# If 'p' was set via -n to list of numbers make this a single element integer list
if type(p) != type([]):
p = [p]
# Target
if options.mcu is None :
args_error(parser, "[ERROR] You should specify an MCU")
mcu = options.mcu
# Toolchain
if options.tool is None:
args_error(parser, "[ERROR] You should specify a TOOLCHAIN")
toolchain = options.tool
# Test
for test_no in p:
test = Test(test_no)
if options.automated is not None: test.automated = options.automated
if options.dependencies is not None: test.dependencies = options.dependencies
if options.host_test is not None: test.host_test = options.host_test;
if options.peripherals is not None: test.peripherals = options.peripherals;
if options.duration is not None: test.duration = options.duration;
if options.extra is not None: test.extra_files = options.extra
if not test.is_supported(mcu, toolchain):
print 'The selected test is not supported on target %s with toolchain %s' % (mcu, toolchain)
sys.exit()
# Linking with extra libraries
if options.rtos: test.dependencies.append(RTOS_LIBRARIES)
if options.eth: test.dependencies.append(ETH_LIBRARY)
if options.usb_host: test.dependencies.append(USB_HOST_LIBRARIES)
if options.usb: test.dependencies.append(USB_LIBRARIES)
if options.dsp: test.dependencies.append(DSP_LIBRARIES)
if options.fat: test.dependencies.append(FS_LIBRARY)
if options.ublox: test.dependencies.append(UBLOX_LIBRARY)
if options.testlib: test.dependencies.append(TEST_MBED_LIB)
build_dir = join(BUILD_DIR, "test", mcu, toolchain, test.id)
if options.source_dir is not None:
test.source_dir = options.source_dir
build_dir = options.source_dir
if options.build_dir is not None:
build_dir = options.build_dir
target = TARGET_MAP[mcu]
try:
bin_file = build_project(test.source_dir, build_dir, target, toolchain, test.dependencies, options.options,
linker_script=options.linker_script,
clean=options.clean,
verbose=options.verbose,
silent=options.silent,
macros=options.macros,
jobs=options.jobs)
print 'Image: %s'% bin_file
if options.disk:
# Simple copy to the mbed disk
copy(bin_file, options.disk)
if options.serial:
# Import pyserial: https://pypi.python.org/pypi/pyserial
from serial import Serial
sleep(target.program_cycle_s())
serial = Serial(options.serial, timeout = 1)
if options.baud:
serial.setBaudrate(options.baud)
serial.flushInput()
serial.flushOutput()
try:
serial.sendBreak()
except:
# In linux a termios.error is raised in sendBreak and in setBreak.
# The following setBreak() is needed to release the reset signal on the target mcu.
try:
serial.setBreak(False)
except:
pass
while True:
c = serial.read(512)
sys.stdout.write(c)
sys.stdout.flush()
except KeyboardInterrupt, e:
print "\n[CTRL+c] exit"
except Exception,e:
if options.verbose:
import traceback
traceback.print_exc(file=sys.stdout)
else:
print "[ERROR] %s" % str(e)
| apache-2.0 |
DazWorrall/ansible | lib/ansible/modules/storage/netapp/netapp_e_amg_sync.py | 56 | 10121 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_amg_sync
short_description: Conduct synchronization actions on asynchronous mirror groups.
description:
- Allows for the initialization, suspension and resumption of an asynchronous mirror group's synchronization for NetApp E-series storage arrays.
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
description:
- The ID of the storage array containing the AMG you wish to target
name:
description:
- The name of the async mirror group you wish to target
required: yes
state:
description:
- The synchronization action you'd like to take.
- If C(running) then it will begin syncing if there is no active sync or will resume a suspended sync. If there is already a sync in
progress, it will return with an OK status.
- If C(suspended) it will suspend any ongoing sync action, but return OK if there is no active sync or if the sync is already suspended
choices:
- running
- suspended
required: yes
delete_recovery_point:
description:
- Indicates whether the failures point can be deleted on the secondary if necessary to achieve the synchronization.
- If true, and if the amount of unsynchronized data exceeds the CoW repository capacity on the secondary for any member volume, the last
failures point will be deleted and synchronization will continue.
- If false, the synchronization will be suspended if the amount of unsynchronized data exceeds the CoW Repository capacity on the secondary
and the failures point will be preserved.
- "NOTE: This only has impact for newly launched syncs."
choices:
- yes
- no
default: no
"""
EXAMPLES = """
- name: start AMG async
netapp_e_amg_sync:
name: "{{ amg_sync_name }}"
state: running
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
"""
RETURN = """
json:
description: The object attributes of the AMG.
returned: success
type: string
example:
{
"changed": false,
"connectionType": "fc",
"groupRef": "3700000060080E5000299C24000006EF57ACAC70",
"groupState": "optimal",
"id": "3700000060080E5000299C24000006EF57ACAC70",
"label": "made_with_ansible",
"localRole": "primary",
"mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC",
"orphanGroup": false,
"recoveryPointAgeAlertThresholdMinutes": 20,
"remoteRole": "secondary",
"remoteTarget": {
"nodeName": {
"ioInterfaceType": "fc",
"iscsiNodeName": null,
"remoteNodeWWN": "20040080E5299F1C"
},
"remoteRef": "9000000060080E5000299C24005B06E557AC7EEC",
"scsiinitiatorTargetBaseProperties": {
"ioInterfaceType": "fc",
"iscsiinitiatorTargetBaseParameters": null
}
},
"remoteTargetId": "ansible2",
"remoteTargetName": "Ansible2",
"remoteTargetWwn": "60080E5000299F880000000056A25D56",
"repositoryUtilizationWarnThreshold": 80,
"roleChangeProgress": "none",
"syncActivity": "idle",
"syncCompletionTimeAlertThresholdMinutes": 10,
"syncIntervalMinutes": 10,
"worldWideName": "60080E5000299C24000006EF57ACAC70"
}
"""
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.urls import open_url
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class AMGsync(object):
def __init__(self):
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
name=dict(required=True, type='str'),
ssid=dict(required=True, type='str'),
state=dict(required=True, type='str', choices=['running', 'suspended']),
delete_recovery_point=dict(required=False, type='bool', default=False)
))
self.module = AnsibleModule(argument_spec=argument_spec)
args = self.module.params
self.name = args['name']
self.ssid = args['ssid']
self.state = args['state']
self.delete_recovery_point = args['delete_recovery_point']
try:
self.user = args['api_username']
self.pwd = args['api_password']
self.url = args['api_url']
except KeyError:
self.module.fail_json(msg="You must pass in api_username"
"and api_password and api_url to the module.")
self.certs = args['validate_certs']
self.post_headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
self.amg_id, self.amg_obj = self.get_amg()
def get_amg(self):
endpoint = self.url + '/storage-systems/%s/async-mirrors' % self.ssid
(rc, amg_objs) = request(endpoint, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
headers=self.post_headers)
try:
amg_id = filter(lambda d: d['label'] == self.name, amg_objs)[0]['id']
amg_obj = filter(lambda d: d['label'] == self.name, amg_objs)[0]
except IndexError:
self.module.fail_json(
msg="There is no async mirror group %s associated with storage array %s" % (self.name, self.ssid))
return amg_id, amg_obj
@property
def current_state(self):
amg_id, amg_obj = self.get_amg()
return amg_obj['syncActivity']
def run_sync_action(self):
# If we get to this point we know that the states differ, and there is no 'err' state,
# so no need to revalidate
post_body = dict()
if self.state == 'running':
if self.current_state == 'idle':
if self.delete_recovery_point:
post_body.update(dict(deleteRecoveryPointIfNecessary=self.delete_recovery_point))
suffix = 'sync'
else:
# In a suspended state
suffix = 'resume'
else:
suffix = 'suspend'
endpoint = self.url + "/storage-systems/%s/async-mirrors/%s/%s" % (self.ssid, self.amg_id, suffix)
(rc, resp) = request(endpoint, method='POST', url_username=self.user, url_password=self.pwd,
validate_certs=self.certs, data=json.dumps(post_body), headers=self.post_headers,
ignore_errors=True)
if not str(rc).startswith('2'):
self.module.fail_json(msg=str(resp['errorMessage']))
return resp
def apply(self):
state_map = dict(
running=['active'],
suspended=['userSuspended', 'internallySuspended', 'paused'],
err=['unkown', '_UNDEFINED'])
if self.current_state not in state_map[self.state]:
if self.current_state in state_map['err']:
self.module.fail_json(
msg="The sync is a state of '%s', this requires manual intervention. " +
"Please investigate and try again" % self.current_state)
else:
self.amg_obj = self.run_sync_action()
(ret, amg) = self.get_amg()
self.module.exit_json(changed=False, **amg)
def main():
sync = AMGsync()
sync.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
invisiblek/python-for-android | python3-alpha/extra_modules/gdata/analytics/service.py | 47 | 13331 | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
# Refactored in 2009 to work for Google Analytics by Sal Uryasev at Juice Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
AccountsService extends the GDataService to streamline Google Analytics
account information operations.
AnalyticsDataService: Provides methods to query google analytics data feeds.
Extends GDataService.
DataQuery: Queries a Google Analytics Data list feed.
AccountQuery: Queries a Google Analytics Account list feed.
"""
__author__ = 'api.suryasev (Sal Uryasev)'
import urllib.request, urllib.parse, urllib.error
import atom
import gdata.service
import gdata.analytics
class AccountsService(gdata.service.GDataService):
"""Client extension for the Google Analytics Account List feed."""
def __init__(self, email="", password=None, source=None,
server='www.google.com/analytics', additional_headers=None,
**kwargs):
"""Creates a client for the Google Analytics service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='analytics',
source=source, server=server, additional_headers=additional_headers,
**kwargs)
def QueryAccountListFeed(self, uri):
"""Retrieves an AccountListFeed by retrieving a URI based off the Document
List feed, including any query parameters. An AccountListFeed object
can be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
An AccountListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.analytics.AccountListFeedFromString)
def GetAccountListEntry(self, uri):
"""Retrieves a particular AccountListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in an Account List feed.
Returns:
An AccountLisFeed object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.analytics.AccountListEntryFromString)
def GetAccountList(self, max_results=1000, text_query=None,
params=None, categories=None):
"""Retrieves a feed containing all of a user's accounts and profiles."""
q = gdata.analytics.service.AccountQuery(max_results=max_results,
text_query=text_query,
params=params,
categories=categories);
return self.QueryAccountListFeed(q.ToUri())
class AnalyticsDataService(gdata.service.GDataService):
"""Client extension for the Google Analytics service Data List feed."""
def __init__(self, email=None, password=None, source=None,
server='www.google.com/analytics', additional_headers=None,
**kwargs):
"""Creates a client for the Google Analytics service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'docs.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(self,
email=email, password=password, service='analytics', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def GetData(self, ids='', dimensions='', metrics='',
sort='', filters='', start_date='',
end_date='', start_index='',
max_results=''):
"""Retrieves a feed containing a user's data
ids: comma-separated string of analytics accounts.
dimensions: comma-separated string of dimensions.
metrics: comma-separated string of metrics.
sort: comma-separated string of dimensions and metrics for sorting.
This may be previxed with a minus to sort in reverse order.
(e.g. '-ga:keyword')
If ommited, the first dimension passed in will be used.
filters: comma-separated string of filter parameters.
(e.g. 'ga:keyword==google')
start_date: start date for data pull.
end_date: end date for data pull.
start_index: used in combination with max_results to pull more than 1000
entries. This defaults to 1.
max_results: maximum results that the pull will return. This defaults
to, and maxes out at 1000.
"""
q = gdata.analytics.service.DataQuery(ids=ids,
dimensions=dimensions,
metrics=metrics,
filters=filters,
sort=sort,
start_date=start_date,
end_date=end_date,
start_index=start_index,
max_results=max_results);
return self.AnalyticsDataFeed(q.ToUri())
def AnalyticsDataFeed(self, uri):
"""Retrieves an AnalyticsListFeed by retrieving a URI based off the
Document List feed, including any query parameters. An
AnalyticsListFeed object can be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
An AnalyticsListFeed object representing the feed returned by the
server.
"""
return self.Get(uri,
converter=gdata.analytics.AnalyticsDataFeedFromString)
"""
Account Fetching
"""
def QueryAccountListFeed(self, uri):
"""Retrieves an Account ListFeed by retrieving a URI based off the Account
List feed, including any query parameters. A AccountQuery object can
be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
An AccountListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.analytics.AccountListFeedFromString)
def GetAccountListEntry(self, uri):
"""Retrieves a particular AccountListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in an Account List feed.
Returns:
An AccountListEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.analytics.AccountListEntryFromString)
def GetAccountList(self, username="default", max_results=1000,
start_index=1):
"""Retrieves a feed containing all of a user's accounts and profiles.
The username parameter is soon to be deprecated, with 'default'
becoming the only allowed parameter.
"""
if not username:
raise Exception("username is a required parameter")
q = gdata.analytics.service.AccountQuery(username=username,
max_results=max_results,
start_index=start_index);
return self.QueryAccountListFeed(q.ToUri())
class DataQuery(gdata.service.Query):
"""Object used to construct a URI to a data feed"""
def __init__(self, feed='/feeds/data', text_query=None,
params=None, categories=None, ids="",
dimensions="", metrics="", sort="", filters="",
start_date="", end_date="", start_index="",
max_results=""):
"""Constructor for Analytics List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/data')
text_query: string (optional) The contents of the q query parameter.
This string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
ids: comma-separated string of analytics accounts.
dimensions: comma-separated string of dimensions.
metrics: comma-separated string of metrics.
sort: comma-separated string of dimensions and metrics.
This may be previxed with a minus to sort in reverse order
(e.g. '-ga:keyword').
If ommited, the first dimension passed in will be used.
filters: comma-separated string of filter parameters
(e.g. 'ga:keyword==google').
start_date: start date for data pull.
end_date: end date for data pull.
start_index: used in combination with max_results to pull more than 1000
entries. This defaults to 1.
max_results: maximum results that the pull will return. This defaults
to, and maxes out at 1000.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.elements = {'ids': ids,
'dimensions': dimensions,
'metrics': metrics,
'sort': sort,
'filters': filters,
'start-date': start_date,
'end-date': end_date,
'start-index': start_index,
'max-results': max_results}
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Analytics
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed]) + '?' + \
urllib.parse.urlencode(dict([(key, value) for key, value in \
self.elements.items() if value]))
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
class AccountQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Account List feed"""
def __init__(self, feed='/feeds/accounts', start_index=1,
max_results=1000, username='default', text_query=None,
params=None, categories=None):
"""Constructor for Account List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
visibility: string (optional) The visibility chosen for the current
feed.
projection: string (optional) The projection chosen for the current
feed.
text_query: string (optional) The contents of the q query parameter.
This string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
username: string (deprecated) This value should now always be passed as
'default'.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.max_results = max_results
self.start_index = start_index
self.username = username
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Account
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed, self.username]) + '?' + \
'&'.join(['max-results=' + str(self.max_results),
'start-index=' + str(self.start_index)])
new_feed = self.feed
self.feed = old_feed
return new_feed
| apache-2.0 |
aonotas/chainer | chainer/functions/util/forget.py | 1 | 4646 | from chainer.backends import cuda
from chainer import function
from chainer import variable
class _DummyFunction(function.Function):
def __init__(self, grads):
self.grads = grads
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
return xp.array(0),
def backward(self, inputs, outputs):
return self.grads
class Forget(function.Function):
def __init__(self, func):
if not callable(func):
raise TypeError('func must be callable')
self.func = func
def _call_func(self, xs):
outs = self.func(*xs)
if isinstance(outs, tuple):
for i, out in enumerate(outs):
if isinstance(out, variable.Variable):
continue
n = i + 1
suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(
n if n < 20 else n % 10, 'th')
msg = ('{}{} element of a returned tuple is not Variable, '
'but is {}').format(n, suffix, type(out))
raise RuntimeError(msg)
elif isinstance(outs, variable.Variable):
outs = (outs,)
else:
msg = ('A tuple of Variables or a Variable are expected, but {} '
'is returned.'.format(type(outs)))
raise RuntimeError(msg)
return outs
def forward(self, inputs):
with function.no_backprop_mode():
xs = [variable.Variable(x) for x in inputs]
outs = self._call_func(xs)
return tuple(out.data for out in outs)
def backward(self, inputs, grads):
with function.force_backprop_mode():
xs = [variable.Variable(x) for x in inputs]
outs = self._call_func(xs)
_DummyFunction(grads)(*outs).backward()
return tuple(x.grad for x in xs)
def forget(func, *xs):
"""Calls a function without storing intermediate results.
On a forward propagation, Chainer normally stores all intermediate results
of :class:`~chainer.variable.VariableNode`\\ s on a computational graph as
they are required on backward propagation.
Sometimes these results consume too much memory.
``F.forget`` *forgets* such intermediate results on forward propagation,
and still supports backpropagation with recalculation.
On a forward propagation, ``F.forget`` calls a given function with given
variables without creating a computational graph. That means, no
intermediate results are stored.
On a backward propagation, ``F.forget`` calls the given function again to
create a computational graph for backpropagation.
``F.forget`` reduces internal memory usage, whereas it requires more
calculation time as it calls the function twice.
.. admonition:: Example
Let ``f`` be a function defined as:
>>> def f(a, b):
... return a + b + a
and, ``x`` and ``y`` be :class:`~chainer.Variable`\\ s:
>>> x = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
>>> y = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
When ``z`` is calculated as ``z = f(x, y)``, its intermediate result
``x + y`` is stored in memory. Instead, if you call ``f`` with
``F.forget``:
>>> z = F.forget(f, x, y)
intermediate ``x + y`` is forgotten.
.. note::
``F.forget`` does not support functions which behave differently in
multiple calls with the same inputs, such as
:meth:`F.dropout() <chainer.functions.dropout>` and
:meth:`F.negative_sampling() <chainer.functions.negative_sampling>`.
.. note::
In case input argument variables are of class :class:`numpy.ndarray` or
:class:`cupy.ndarray` objects, arguments will automatically be
converted to :class:`~chainer.Variable`\\ s.
This conversion takes place to ensure that this function is included
in the computational graph to enable backward computations.
Args:
func (callable): A function to call. It needs to be called with
:class:`~chainer.Variable` object(s) and to return a
:class:`~chainer.Variable` object or a tuple of
:class:`~chainer.Variable` objects.
xs (~chainer.Variable): Argument variables of the function.
Returns:
~chainer.Variable: A variable ``func`` returns. If it returns a tuple,
the method returns a tuple too.
"""
xs = tuple(x if isinstance(x, variable.Variable) else
variable.Variable(x, requires_grad=True) for x in xs)
return Forget(func)(*xs)
| mit |
wolfier/incubator-airflow | airflow/contrib/hooks/gcp_dataflow_hook.py | 2 | 10241 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import select
import subprocess
import time
import uuid
from apiclient.discovery import build
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
# This is the default location
# https://cloud.google.com/dataflow/pipelines/specifying-exec-params
DEFAULT_DATAFLOW_LOCATION = 'us-central1'
class _DataflowJob(LoggingMixin):
def __init__(self, dataflow, project_number, name, location, poll_sleep=10):
self._dataflow = dataflow
self._project_number = project_number
self._job_name = name
self._job_location = location
self._job_id = None
self._job = self._get_job()
self._poll_sleep = poll_sleep
def _get_job_id_from_name(self):
jobs = self._dataflow.projects().locations().jobs().list(
projectId=self._project_number,
location=self._job_location
).execute()
for job in jobs['jobs']:
if job['name'] == self._job_name:
self._job_id = job['id']
return job
return None
def _get_job(self):
if self._job_name:
job = self._get_job_id_from_name()
else:
job = self._dataflow.projects().jobs().get(
projectId=self._project_number,
jobId=self._job_id
).execute()
if job and 'currentState' in job:
self.log.info(
'Google Cloud DataFlow job %s is %s',
job['name'], job['currentState']
)
elif job:
self.log.info(
'Google Cloud DataFlow with job_id %s has name %s',
self._job_id, job['name']
)
else:
self.log.info(
'Google Cloud DataFlow job not available yet..'
)
return job
def wait_for_done(self):
while True:
if self._job and 'currentState' in self._job:
if 'JOB_STATE_DONE' == self._job['currentState']:
return True
elif 'JOB_STATE_RUNNING' == self._job['currentState'] and \
'JOB_TYPE_STREAMING' == self._job['type']:
return True
elif 'JOB_STATE_FAILED' == self._job['currentState']:
raise Exception("Google Cloud Dataflow job {} has failed.".format(
self._job['name']))
elif 'JOB_STATE_CANCELLED' == self._job['currentState']:
raise Exception("Google Cloud Dataflow job {} was cancelled.".format(
self._job['name']))
elif 'JOB_STATE_RUNNING' == self._job['currentState']:
time.sleep(self._poll_sleep)
elif 'JOB_STATE_PENDING' == self._job['currentState']:
time.sleep(15)
else:
self.log.debug(str(self._job))
raise Exception(
"Google Cloud Dataflow job {} was unknown state: {}".format(
self._job['name'], self._job['currentState']))
else:
time.sleep(15)
self._job = self._get_job()
def get(self):
return self._job
class _Dataflow(LoggingMixin):
def __init__(self, cmd):
self.log.info("Running command: %s", ' '.join(cmd))
self._proc = subprocess.Popen(
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
def _line(self, fd):
if fd == self._proc.stderr.fileno():
lines = self._proc.stderr.readlines()
for line in lines:
self.log.warning(line[:-1])
if lines:
return lines[-1]
if fd == self._proc.stdout.fileno():
line = self._proc.stdout.readline()
return line
@staticmethod
def _extract_job(line):
if line is not None:
if line.startswith("Submitted job: "):
return line[15:-1]
def wait_for_done(self):
reads = [self._proc.stderr.fileno(), self._proc.stdout.fileno()]
self.log.info("Start waiting for DataFlow process to complete.")
while self._proc.poll() is None:
ret = select.select(reads, [], [], 5)
if ret is not None:
for fd in ret[0]:
line = self._line(fd)
if line:
self.log.debug(line[:-1])
else:
self.log.info("Waiting for DataFlow process to complete.")
if self._proc.returncode is not 0:
raise Exception("DataFlow failed with return code {}".format(
self._proc.returncode))
class DataFlowHook(GoogleCloudBaseHook):
def __init__(self,
gcp_conn_id='google_cloud_default',
delegate_to=None,
poll_sleep=10):
self.poll_sleep = poll_sleep
super(DataFlowHook, self).__init__(gcp_conn_id, delegate_to)
def get_conn(self):
"""
Returns a Google Cloud Storage service object.
"""
http_authorized = self._authorize()
return build('dataflow', 'v1b3', http=http_authorized)
def _start_dataflow(self, task_id, variables, name,
command_prefix, label_formatter):
variables = self._set_variables(variables)
cmd = command_prefix + self._build_cmd(task_id, variables,
label_formatter)
_Dataflow(cmd).wait_for_done()
_DataflowJob(self.get_conn(), variables['project'], name,
variables['region'], self.poll_sleep).wait_for_done()
@staticmethod
def _set_variables(variables):
if variables['project'] is None:
raise Exception('Project not specified')
if 'region' not in variables.keys():
variables['region'] = DEFAULT_DATAFLOW_LOCATION
return variables
def start_java_dataflow(self, task_id, variables, dataflow, job_class=None,
append_job_name=True):
if append_job_name:
name = task_id + "-" + str(uuid.uuid1())[:8]
else:
name = task_id
variables['jobName'] = name
def label_formatter(labels_dict):
return ['--labels={}'.format(
json.dumps(labels_dict).replace(' ', ''))]
command_prefix = (["java", "-cp", dataflow, job_class] if job_class
else ["java", "-jar", dataflow])
self._start_dataflow(task_id, variables, name,
command_prefix, label_formatter)
def start_template_dataflow(self, task_id, variables, parameters, dataflow_template,
append_job_name=True):
if append_job_name:
name = task_id + "-" + str(uuid.uuid1())[:8]
else:
name = task_id
self._start_template_dataflow(
name, variables, parameters, dataflow_template)
def start_python_dataflow(self, task_id, variables, dataflow, py_options,
append_job_name=True):
if append_job_name:
name = task_id + "-" + str(uuid.uuid1())[:8]
else:
name = task_id
variables['job_name'] = name
def label_formatter(labels_dict):
return ['--labels={}={}'.format(key, value)
for key, value in labels_dict.items()]
self._start_dataflow(task_id, variables, name,
["python"] + py_options + [dataflow],
label_formatter)
def _build_cmd(self, task_id, variables, label_formatter):
command = ["--runner=DataflowRunner"]
if variables is not None:
for attr, value in variables.items():
if attr == 'labels':
command += label_formatter(value)
elif value is None or value.__len__() < 1:
command.append("--" + attr)
else:
command.append("--" + attr + "=" + value)
return command
def _start_template_dataflow(self, name, variables, parameters, dataflow_template):
# Builds RuntimeEnvironment from variables dictionary
# https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
environment = {}
for key in ['maxWorkers', 'zone', 'serviceAccountEmail', 'tempLocation',
'bypassTempDirValidation', 'machineType']:
if key in variables:
environment.update({key: variables[key]})
body = {"jobName": name,
"parameters": parameters,
"environment": environment}
service = self.get_conn()
request = service.projects().templates().launch(projectId=variables['project'],
gcsPath=dataflow_template,
body=body)
response = request.execute()
variables = self._set_variables(variables)
_DataflowJob(self.get_conn(), variables['project'], name, variables['region'],
self.poll_sleep).wait_for_done()
return response
| apache-2.0 |
hiteshwadekar/ns-3-dev-ndnSIM | src/uan/bindings/callbacks_list.py | 150 | 1150 | callback_classes = [
['void', 'int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'double', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'double', 'ns3::UanTxMode', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::UanAddress const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| gpl-2.0 |
shiora/The-Perfect-Pokemon-Team-Balancer | libs/env/Lib/site-packages/whoosh/automata/nfa.py | 95 | 10498 | # Copyright 2012 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.automata.fst import Arc
class Instruction(object):
def __repr__(self):
return "%s()" % (self.__class__.__name__, )
class Char(Instruction):
"""
Matches a literal character.
"""
def __init__(self, c):
self.c = c
def __repr__(self):
return "Char(%r)" % self.c
class Lit(Instruction):
"""
Matches a literal string.
"""
def __init__(self, c):
self.c = c
def __repr__(self):
return "Lit(%r)" % self.c
class Any(Instruction):
"""
Matches any character.
"""
class Match(Instruction):
"""
Stop this thread: the string matched.
"""
def __repr__(self):
return "Match()"
class Jmp(Instruction):
"""
Jump to a specified instruction.
"""
def __init__(self, x):
self.x = x
def __repr__(self):
return "Jmp(%s)" % self.x
class Split(Instruction):
"""
Split execution: continue at two separate specified instructions.
"""
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Split(%s, %s)" % (self.x, self.y)
class Label(Instruction):
"""
Placeholder to act as a target for JMP instructions
"""
def __hash__(self):
return id(self)
def __repr__(self):
return "L(%s)" % hex(id(self))
def concat(e1, e2):
return e1 + e2
def alt(e1, e2):
L1, L2, L3 = Label(), Label(), Label()
return [L1] + e1 + [Jmp(L3), L2] + e2 + [L3]
def zero_or_one(e):
L1, L2 = Label(), Label()
return [Split(L1, L2), L1] + e + [L2]
def zero_or_more(e):
L1, L2, L3 = Label(), Label(), Label()
return [L1, Split(L2, L3), L2] + e + [Jmp(L1), L3]
def one_or_more(e):
L1, L2 = Label(), Label()
return [L1] + e + [Split(L1, L2), L2]
def fixup(program):
refs = {}
i = 0
while i < len(program):
op = program[i]
if isinstance(op, Label):
refs[op] = i
program.pop(i)
else:
i += 1
if refs:
for op in program:
if isinstance(op, (Jmp, Split)):
op.x = refs[op.x]
if isinstance(op, Split):
op.y = refs[op.y]
return program + [Match]
class ThreadList(object):
def __init__(self, program, max=1000):
self.program = program
self.max = max
self.threads = []
def __nonzero__(self):
return bool(self.threads)
def current(self):
return self.threads.pop()
def add(self, thread):
op = self.program[thread.pc]
optype = type(op)
if optype is Jmp:
self.add(thread.at(op.x))
elif optype is Split:
self.add(thread.copy_at(op.x))
self.add(thread.at(op.y))
else:
self.threads.append(thread)
class Thread(object):
def __init__(self, pc, address, sofar='', accept=False):
self.pc = pc
self.address = address
self.sofar = sofar
self.accept = accept
def at(self, pc):
self.pc = pc
return self
def copy_at(self, pc):
return Thread(pc, self.address, self.sofar, self.accept)
def __repr__(self):
d = self.__dict__
return "Thread(%s)" % ",".join("%s=%r" % (k, v) for k, v in d.items())
def advance(thread, arc, c):
thread.pc += 1
thread.address = arc.target
thread.sofar += c
thread.accept = arc.accept
def run(graph, program, address):
threads = ThreadList(program)
threads.add(Thread(0, address))
arc = Arc()
while threads:
thread = threads.current()
address = thread.address
op = program[thread.pc]
optype = type(op)
if optype is Char:
if address:
arc = graph.find_arc(address, op.c, arc)
if arc:
advance(thread, arc)
threads.add(thread)
elif optype is Lit:
if address:
c = op.c
arc = graph.find_path(c, arc, address)
if arc:
advance(thread, arc, c)
threads.add(thread)
elif optype is Any:
if address:
sofar = thread.sofar
pc = thread.pc + 1
for arc in graph.iter_arcs(address, arc):
t = Thread(pc, arc.target, sofar + arc.label, arc.accept)
threads.add(t)
elif op is Match:
if thread.accept:
yield thread.sofar
else:
raise Exception("Don't know what to do with %r" % op)
LO = 0
HI = 1
def regex_limit(graph, mode, program, address):
low = mode == LO
output = []
threads = ThreadList(program)
threads.add(Thread(0, address))
arc = Arc()
while threads:
thread = threads.current()
address = thread.address
op = program[thread.pc]
optype = type(op)
if optype is Char:
if address:
arc = graph.find_arc(address, op.c, arc)
if arc:
if low and arc.accept:
return thread.sofar + thread.label
advance(thread, arc)
threads.add(thread)
elif optype is Lit:
if address:
labels = op.c
for label in labels:
arc = graph.find_arc(address, label)
if arc is None:
return thread.sofar
elif thread.accept:
return thread.sofar
elif optype is Any:
if address:
if low:
arc = graph.arc_at(address, arc)
else:
for arc in graph.iter_arcs(address):
pass
advance(thread, arc, arc.label)
threads.add(thread)
elif thread.accept:
return thread.sofar
elif op is Match:
return thread.sofar
else:
raise Exception("Don't know what to do with %r" % op)
# if __name__ == "__main__":
# from whoosh import index, query
# from whoosh.filedb.filestore import RamStorage
# from whoosh.automata import fst
# from whoosh.util.testing import timing
#
# st = RamStorage()
# gw = fst.GraphWriter(st.create_file("test"))
# gw.start_field("test")
# for key in ["aaaa", "aaab", "aabb", "abbb", "babb", "bbab", "bbba"]:
# gw.insert(key)
# gw.close()
# gr = fst.GraphReader(st.open_file("test"))
#
# program = one_or_more([Lit("a")])
# print program
# program = fixup(program)
# print program
# print list(run(gr, program, gr.root("test")))
#
# ix = index.open_dir("e:/dev/src/houdini/help/index")
# r = ix.reader()
# gr = r._get_graph()
#
# # program = fixup([Any(), Any(), Any(), Any(), Any()])
# # program = fixup(concat(zero_or_more([Any()]), [Char("/")]))
# # with timing():
# # x = list(run(gr, program, gr.root("path")))
# # print len(x)
#
# q = query.Regex("path", "^.[abc].*/$")
# with timing():
# y = list(q._btexts(r))
# print len(y)
# print y[0], y[-1]
#
# pr = [Any()] + alt([Lit("c")], alt([Lit("b")], [Lit("a")])) + zero_or_more([Any()]) + [Lit("/")]
# program = fixup(pr)
# # with timing():
# # x = list(run(gr, program, gr.root("path")))
# # print len(x), x
#
# with timing():
# print "lo=", regex_limit(gr, LO, program, gr.root("path"))
# print "hi=", regex_limit(gr, HI, program, gr.root("path"))
#
#
#
# #int
# #backtrackingvm(Inst *prog, char *input)
# #{
# # enum { MAXTHREAD = 1000 };
# # Thread ready[MAXTHREAD];
# # int nready;
# # Inst *pc;
# # char *sp;
# #
# # /* queue initial thread */
# # ready[0] = thread(prog, input);
# # nready = 1;
# #
# # /* run threads in stack order */
# # while(nready > 0){
# # --nready; /* pop state for next thread to run */
# # pc = ready[nready].pc;
# # sp = ready[nready].sp;
# # for(;;){
# # switch(pc->opcode){
# # case Char:
# # if(*sp != pc->c)
# # goto Dead;
# # pc++;
# # sp++;
# # continue;
# # case Match:
# # return 1;
# # case Jmp:
# # pc = pc->x;
# # continue;
# # case Split:
# # if(nready >= MAXTHREAD){
# # fprintf(stderr, "regexp overflow");
# # return -1;
# # }
# # /* queue new thread */
# # ready[nready++] = thread(pc->y, sp);
# # pc = pc->x; /* continue current thread */
# # continue;
# # }
# # }
# # Dead:;
# # }
# # return 0;
# #}
#
#
| gpl-2.0 |
aglne/lenskit | lenskit-integration-tests/src/it/eval/external-algorithms/item-mean.py | 9 | 2714 | import sys
class ItemMeanData(object):
def __init__(self):
self.global_sum = 0
self.global_count = 0
self.item_sums = {}
self.item_counts = {}
def train(self, trainfile):
with open(trainfile) as f:
for line in f:
user, item, rating = line.strip().split(',')[:3]
item = int(item)
rating = float(rating)
self.global_sum += rating
self.global_count += 1
if item not in self.item_sums:
self.item_sums[item] = rating
self.item_counts[item] = 1
else:
self.item_sums[item] += rating
self.item_counts[item] += 1
def global_mean(self):
return self.global_sum / self.global_count
def item_set(self):
return set(self.item_counts.iterkeys())
def item_mean_offsets(self):
means = {}
gmean = self.global_mean()
for item, n in self.item_counts.iteritems():
means[item] = self.item_sums[item] / n - gmean
return gmean, means
def score_items(self, to_score, output):
global_mean, item_means = self.item_mean_offsets()
for user, items in to_score.iteritems():
for item in items:
pred = global_mean
if item in item_means:
pred += item_means[item]
print >> output, "%s,%s,%.3f" % (user, item, pred)
def load_test_pairs(testfile):
to_score = {}
with open(testfile) as testf:
for line in testf:
user, item = line.strip().split(',')[:2]
user = int(user)
item = int(item)
if user in to_score:
to_score[user].add(item)
else:
to_score[user] = set([item])
return to_score
def load_query_users(userfile, items):
to_score = {}
with open(userfile) as userf:
for line in userf:
user = int(line.strip())
to_score[user] = items
return to_score
if sys.argv[1] == '--for-users':
userfile, trainfile = sys.argv[2:4]
testfile = None
outfile = None
else:
trainfile, testfile, outfile = sys.argv[1:4]
userfile = None
model = ItemMeanData()
model.train(trainfile)
if testfile is not None:
to_score = load_test_pairs(testfile)
elif userfile is not None:
to_score = load_query_users(userfile, model.item_set())
else:
print >> sys.stderr, "no user file specified"
sys.exit(1)
if outfile is None:
model.score_items(to_score, sys.stdout)
else:
with open(outfile, 'w') as outf:
model.score_items(to_score, outf)
| lgpl-2.1 |
gerv/bedrock | tests/functional/contribute/test_tasks.py | 11 | 2578 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from pages.contribute.tasks import ContributeSignUpPage
@pytest.mark.smoke
@pytest.mark.nondestructive
def test_contribute_task_twitter(base_url, selenium):
page = ContributeSignUpPage(selenium, base_url).open()
twitter_task_page = page.open_twitter_task()
assert twitter_task_page.seed_url in selenium.current_url
assert twitter_task_page.is_share_button_displayed
assert twitter_task_page.is_follow_button_displayed
@pytest.mark.smoke
@pytest.mark.nondestructive
def test_contribute_task_firefox_mobile(base_url, selenium):
page = ContributeSignUpPage(selenium, base_url).open()
mobile_task_page = page.open_mobile_task()
assert mobile_task_page.seed_url in selenium.current_url
assert mobile_task_page.is_android_download_button_displayed
assert mobile_task_page.is_ios_download_button_displayed
@pytest.mark.nondestructive
def test_contribute_task_encryption(base_url, selenium):
page = ContributeSignUpPage(selenium, base_url).open()
encryption_task_page = page.open_encryption_task()
assert encryption_task_page.seed_url in selenium.current_url
assert encryption_task_page.is_take_the_pledge_button_displayed
@pytest.mark.nondestructive
def test_contribute_task_joy_of_coding(base_url, selenium):
page = ContributeSignUpPage(selenium, base_url).open()
joy_of_coding_task_page = page.open_joy_of_coding_task()
assert joy_of_coding_task_page.seed_url in selenium.current_url
assert joy_of_coding_task_page.is_video_displayed
assert joy_of_coding_task_page.is_watch_the_video_button_displayed
@pytest.mark.nondestructive
def test_contribute_task_dev_tools_challenger(base_url, selenium):
page = ContributeSignUpPage(selenium, base_url).open()
dev_tools_challenger_task_page = page.open_dev_tools_challenger_task()
assert dev_tools_challenger_task_page.seed_url in selenium.current_url
assert dev_tools_challenger_task_page.download_button.is_displayed
assert dev_tools_challenger_task_page.is_visit_dev_tools_challenger_button_displayed
@pytest.mark.nondestructive
def test_contribute_task_stumbler(base_url, selenium):
page = ContributeSignUpPage(selenium, base_url).open()
stumbler_task_page = page.open_stumbler_task()
assert stumbler_task_page.seed_url in selenium.current_url
assert stumbler_task_page.is_stumbler_button_displayed
| mpl-2.0 |
tkstman/lab5 | main/lib/werkzeug/debug/tbtools.py | 311 | 16785 | # -*- coding: utf-8 -*-
"""
werkzeug.debug.tbtools
~~~~~~~~~~~~~~~~~~~~~~
This module provides various traceback related utility functions.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import re
import os
import sys
import json
import inspect
import traceback
import codecs
from tokenize import TokenError
from werkzeug.utils import cached_property, escape
from werkzeug.debug.console import Console
from werkzeug._compat import range_type, PY2, text_type, string_types
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
_line_re = re.compile(r'^(.*?)$(?m)')
_funcdef_re = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
UTF8_COOKIE = '\xef\xbb\xbf'
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
HEADER = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css" type="text/css">
<!-- We need to make sure this has a favicon so that the debugger does not by
accident trigger a request to /favicon.ico which might change the application
state. -->
<link rel="shortcut icon" href="?__debugger__=yes&cmd=resource&f=console.png">
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=jquery.js"></script>
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
SECRET = "%(secret)s";
</script>
</head>
<body>
<div class="debugger">
'''
FOOTER = u'''\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
</body>
</html>
'''
PAGE_HTML = HEADER + u'''\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<form action="/?__debugger__=yes&cmd=paste" method="post">
<p>
<input type="hidden" name="language" value="pytb">
This is the Copy/Paste friendly version of the traceback. <span
class="pastemessage">You can also paste this traceback into
a <a href="https://gist.github.com/">gist</a>:
<input type="submit" value="create paste"></span>
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</form>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
''' + FOOTER + '''
<!--
%(plaintext_cs)s
-->
'''
CONSOLE_HTML = HEADER + u'''\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
''' + FOOTER
SUMMARY_HTML = u'''\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
'''
FRAME_HTML = u'''\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<pre>%(current_line)s</pre>
</div>
'''
SOURCE_TABLE_HTML = u'<table class=source>%s</table>'
SOURCE_LINE_HTML = u'''\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
'''
def render_console_html(secret):
return CONSOLE_HTML % {
'evalex': 'true',
'console': 'true',
'title': 'Console',
'secret': secret,
'traceback_id': -1
}
def get_current_traceback(ignore_system_exceptions=False,
show_hidden_frames=False, skip=0):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
exc_type, exc_value, tb = sys.exc_info()
if ignore_system_exceptions and exc_type in system_exceptions:
raise
for x in range_type(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ('lineno', 'code', 'in_frame', 'current')
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
def classes(self):
rv = ['line']
if self.in_frame:
rv.append('in-frame')
if self.current:
rv.append('current')
return rv
classes = property(classes)
def render(self):
return SOURCE_LINE_HTML % {
'classes': u' '.join(self.classes),
'lineno': self.lineno,
'code': escape(self.code)
}
class Traceback(object):
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
if not isinstance(exc_type, str):
exception_type = exc_type.__name__
if exc_type.__module__ not in ('__builtin__', 'exceptions'):
exception_type = exc_type.__module__ + '.' + exception_type
else:
exception_type = exc_type
self.exception_type = exception_type
# we only add frames to the list that are not hidden. This follows
# the the magic variables as defined by paste.exceptions.collector
self.frames = []
while tb:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
if not self.frames:
return
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ('before', 'before_and_this'):
new_frames = []
hidden = False
if hide == 'before_and_this':
continue
elif hide in ('reset', 'reset_and_this'):
hidden = False
if hide == 'reset_and_this':
continue
elif hide in ('after', 'after_and_this'):
hidden = True
if hide == 'after_and_this':
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == 'codeop':
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
is_syntax_error = property(is_syntax_error)
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
rv = ''.join(buf).strip()
return rv.decode('utf-8', 'replace') if PY2 else rv
exception = property(exception)
def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.rstrip() + u'\n'
if PY2:
tb.encode('utf-8', 'replace')
logfile.write(tb)
def paste(self):
"""Create a paste and return the paste id."""
data = json.dumps({
'description': 'Werkzeug Internal Server Error',
'public': False,
'files': {
'traceback.txt': {
'content': self.plaintext
}
}
}).encode('utf-8')
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
rv = urlopen('https://api.github.com/gists', data=data)
resp = json.loads(rv.read().decode('utf-8'))
rv.close()
return {
'url': resp['html_url'],
'id': resp['id']
}
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ''
frames = []
classes = ['traceback']
if not self.frames:
classes.append('noframe-traceback')
if include_title:
if self.is_syntax_error:
title = u'Syntax Error'
else:
title = u'Traceback <em>(most recent call last)</em>:'
for frame in self.frames:
frames.append(u'<li%s>%s' % (
frame.info and u' title="%s"' % escape(frame.info) or u'',
frame.render()
))
if self.is_syntax_error:
description_wrapper = u'<pre class=syntaxerror>%s</pre>'
else:
description_wrapper = u'<blockquote>%s</blockquote>'
return SUMMARY_HTML % {
'classes': u' '.join(classes),
'title': title and u'<h3>%s</h3>' % title or u'',
'frames': u'\n'.join(frames),
'description': description_wrapper % escape(self.exception)
}
def render_full(self, evalex=False, secret=None):
"""Render the Full HTML page with the traceback info."""
exc = escape(self.exception)
return PAGE_HTML % {
'evalex': evalex and 'true' or 'false',
'console': 'false',
'title': exc,
'exception': exc,
'exception_type': escape(self.exception_type),
'summary': self.render_summary(include_title=False),
'plaintext': self.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', self.plaintext),
'traceback_id': self.id,
'secret': secret
}
def generate_plaintext_traceback(self):
"""Like the plaintext attribute but returns a generator"""
yield u'Traceback (most recent call last):'
for frame in self.frames:
yield u' File "%s", line %s, in %s' % (
frame.filename,
frame.lineno,
frame.function_name
)
yield u' ' + frame.current_line.strip()
yield self.exception
def plaintext(self):
return u'\n'.join(self.generate_plaintext_traceback())
plaintext = cached_property(plaintext)
id = property(lambda x: id(x))
class Frame(object):
"""A single frame in a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in ('.pyo', '.pyc'):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn = os.path.realpath(fn)
self.filename = fn
self.module = self.globals.get('__name__')
self.loader = self.globals.get('__loader__')
self.code = tb.tb_frame.f_code
# support for paste's traceback extensions
self.hide = self.locals.get('__traceback_hide__', False)
info = self.locals.get('__traceback_info__')
if info is not None:
try:
info = text_type(info)
except UnicodeError:
info = str(info).decode('utf-8', 'replace')
self.info = info
def render(self):
"""Render a single frame in a traceback."""
return FRAME_HTML % {
'id': self.id,
'filename': escape(self.filename),
'lineno': self.lineno,
'function_name': escape(self.function_name),
'current_line': escape(self.current_line.strip())
}
def get_annotated_lines(self):
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, 'co_firstlineno'):
lineno = self.code.co_firstlineno - 1
while lineno > 0:
if _funcdef_re.match(lines[lineno].code):
break
lineno -= 1
try:
offset = len(inspect.getblock([x.code + '\n' for x
in lines[lineno:]]))
except TokenError:
offset = 0
for line in lines[lineno:lineno + offset]:
line.in_frame = True
# mark current line
try:
lines[self.lineno - 1].current = True
except IndexError:
pass
return lines
def render_source(self):
"""Render the sourcecode."""
return SOURCE_TABLE_HTML % u'\n'.join(line.render() for line in
self.get_annotated_lines())
def eval(self, code, mode='single'):
"""Evaluate code in the context of the frame."""
if isinstance(code, string_types):
if PY2 and isinstance(code, unicode):
code = UTF8_COOKIE + code.encode('utf-8')
code = compile(code, '<interactive>', mode)
return eval(code, self.globals, self.locals)
@cached_property
def sourcelines(self):
"""The sourcecode of the file as list of unicode strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, 'get_source'):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, 'get_source_by_code'):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
f = open(self.filename)
except IOError:
return []
try:
source = f.read()
finally:
f.close()
# already unicode? return right away
if isinstance(source, text_type):
return source.splitlines()
# yes. it should be ascii, but we don't want to reject too many
# characters in the debugger if something breaks
charset = 'utf-8'
if source.startswith(UTF8_COOKIE):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
match = _line_re.search(match.group())
if match is not None:
charset = match.group(1)
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
try:
codecs.lookup(charset)
except LookupError:
charset = 'utf-8'
return source.decode(charset, 'replace').splitlines()
@property
def current_line(self):
try:
return self.sourcelines[self.lineno - 1]
except IndexError:
return u''
@cached_property
def console(self):
return Console(self.globals, self.locals)
id = property(lambda x: id(x))
| mit |
pienkowb/omelette | omelette/compiler/test/test_validator.py | 1 | 1773 | import unittest
from omelette.compiler.uml import UMLObject
from omelette.compiler.validator import Validator
from omelette.compiler import logging
class ValidatorTest(unittest.TestCase):
def setUp(self):
self.uml_object = UMLObject(name="association")
self.uml_object.required = {"source-object": "OBJECT"}
self.uml_object.allowed = {
"arrow": "STRING",
"direction": ["none", "source", "target", "both"],
"source-role": "STRING",
"source-count": "MULTIPLICITY"}
self.uml_objects = {
"association": self.uml_object,
"Student": UMLObject(name="Student")}
self.logger = logging.getLogger("compiler")
self.logger.flush()
def test_validate_all_allowed(self):
self.uml_object.properties = {
"arrow": ("association", "STRING"),
"direction": ("target", "OBJECT"),
"source-object": ("Student", "OBJECT"),
"source-role": ("learns", "STRING"),
"source-count": ("1", "MULTIPLICITY")}
Validator(self.uml_objects).validate()
self.assertTrue(self.logger.is_empty())
def test_validate_not_allowed(self):
self.uml_object.properties = {
"stereotype": ("not_allowed", "STRING"),
"source-object": ("Student", "OBJECT")}
Validator(self.uml_objects).validate()
self.assertFalse(self.logger.is_empty())
def test_validate_no_required(self):
self.uml_object.properties = {
"arrow": ("association", "STRING"),
"direction": ("none", "OBJECT")}
Validator(self.uml_objects).validate()
self.assertFalse(self.logger.is_empty())
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
blab/stability | augur/mutator/mutate_everything_2YP7/foldx_essentials/mutation_stability.py | 2 | 2540 |
class mutation_stability(object):
'''
check mutation and format it so that it's compatible with foldx structure 1HA0 and 2YP7
'''
def __init__(self, mut, structure):
self.mut = mut # list of mutations
self.mut_set = set(mut) # set of mutations
self.mut_chain_info_set = set()
self.structure = structure # either 1HA0 or 2YP7
if self.structure not in ["1HA0", "2YP7"]:
raise ValueError("This program only works for pdb structures 1HA0 or 2YP7")
def __str__(self):
return ", ".join(self.mut_chain_info_set)
def site_range_valid(self, mutation):
'''
protein structures (1HA0, 2YP7) are missing certain amino acid sites, method checks that mutation is in structure
:param mutation: mutation in standard format
:return: true if site is in structure, false if site range is not in structure
'''
lowerRange = 9
upperRange = 502
missing_lower = 328
missing_upper = 333
site = int(mutation[1:len(mutation) - 1])
if missing_lower <= site <= missing_upper: # in missing middle section
return False
elif lowerRange <= site <= upperRange: # in range of protein structure besides middle section
return True
else:
return False
def include_chain_info(self, mutation):
'''
includes chain information for each mutation passed to function. HA is a trimer so need to specify chain for
foldx
:param mutation: mutation in standard format
'''
set_with_chain_mutations = set()
if self.structure == "1HA0":
chains = ["A", "M", "Y"]
elif self.structure == "2YP7":
chains = ["A", "P", "E"]
site = mutation[1:len(mutation) - 1]
aa1 = mutation[0]
aa2 = mutation[len(mutation)-1]
for chain in chains:
self.mut_chain_info_set.add(aa1+chain+site+aa2)
def check_valid_mutation(self):
'''
checks each mutation in mut_set that it is a valid mutation for the structures 1HA0, 2YP7. Calls
include_chain_info, which adds each mutation with chain info to self.mut_chain_info_set.
'''
for mutation in self.mut_set:
site_valid = self.site_range_valid(mutation)
if site_valid:
self.include_chain_info(mutation)
def get_formatted_mutations(self):
self.check_valid_mutation()
return ';'.join(self.mut_chain_info_set)
| agpl-3.0 |
elkingtonmcb/sympy | sympy/stats/rv_interface.py | 88 | 5205 | from __future__ import print_function, division
from .rv import (probability, expectation, density, where, given, pspace, cdf,
sample, sample_iter, random_symbols, independent, dependent,
sampling_density)
from sympy import sqrt
__all__ = ['P', 'E', 'density', 'where', 'given', 'sample', 'cdf', 'pspace',
'sample_iter', 'variance', 'std', 'skewness', 'covariance',
'dependent', 'independent', 'random_symbols', 'correlation',
'moment', 'cmoment', 'sampling_density']
def moment(X, n, c=0, condition=None, **kwargs):
"""
Return the nth moment of a random expression about c i.e. E((X-c)**n)
Default value of c is 0.
Examples
========
>>> from sympy.stats import Die, moment, E
>>> X = Die('X', 6)
>>> moment(X, 1, 6)
-5/2
>>> moment(X, 2)
91/6
>>> moment(X, 1) == E(X)
True
"""
return expectation((X - c)**n, condition, **kwargs)
def variance(X, condition=None, **kwargs):
"""
Variance of a random expression
Expectation of (X-E(X))**2
Examples
========
>>> from sympy.stats import Die, E, Bernoulli, variance
>>> from sympy import simplify, Symbol
>>> X = Die('X', 6)
>>> p = Symbol('p')
>>> B = Bernoulli('B', p, 1, 0)
>>> variance(2*X)
35/3
>>> simplify(variance(B))
p*(-p + 1)
"""
return cmoment(X, 2, condition, **kwargs)
def standard_deviation(X, condition=None, **kwargs):
"""
Standard Deviation of a random expression
Square root of the Expectation of (X-E(X))**2
Examples
========
>>> from sympy.stats import Bernoulli, std
>>> from sympy import Symbol, simplify
>>> p = Symbol('p')
>>> B = Bernoulli('B', p, 1, 0)
>>> simplify(std(B))
sqrt(p*(-p + 1))
"""
return sqrt(variance(X, condition, **kwargs))
std = standard_deviation
def covariance(X, Y, condition=None, **kwargs):
"""
Covariance of two random expressions
The expectation that the two variables will rise and fall together
Covariance(X,Y) = E( (X-E(X)) * (Y-E(Y)) )
Examples
========
>>> from sympy.stats import Exponential, covariance
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True, finite=True)
>>> X = Exponential('X', rate)
>>> Y = Exponential('Y', rate)
>>> covariance(X, X)
lambda**(-2)
>>> covariance(X, Y)
0
>>> covariance(X, Y + rate*X)
1/lambda
"""
return expectation(
(X - expectation(X, condition, **kwargs)) *
(Y - expectation(Y, condition, **kwargs)),
condition, **kwargs)
def correlation(X, Y, condition=None, **kwargs):
"""
Correlation of two random expressions, also known as correlation
coefficient or Pearson's correlation
The normalized expectation that the two variables will rise
and fall together
Correlation(X,Y) = E( (X-E(X)) * (Y-E(Y)) / (sigma(X) * sigma(Y)) )
Examples
========
>>> from sympy.stats import Exponential, correlation
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True, finite=True)
>>> X = Exponential('X', rate)
>>> Y = Exponential('Y', rate)
>>> correlation(X, X)
1
>>> correlation(X, Y)
0
>>> correlation(X, Y + rate*X)
1/sqrt(1 + lambda**(-2))
"""
return covariance(X, Y, condition, **kwargs)/(std(X, condition, **kwargs)
* std(Y, condition, **kwargs))
def cmoment(X, n, condition=None, **kwargs):
"""
Return the nth central moment of a random expression about its mean
i.e. E((X - E(X))**n)
Examples
========
>>> from sympy.stats import Die, cmoment, variance
>>> X = Die('X', 6)
>>> cmoment(X, 3)
0
>>> cmoment(X, 2)
35/12
>>> cmoment(X, 2) == variance(X)
True
"""
mu = expectation(X, condition, **kwargs)
return moment(X, n, mu, condition, **kwargs)
def smoment(X, n, condition=None, **kwargs):
"""
Return the nth Standardized moment of a random expression i.e.
E( ((X - mu)/sigma(X))**n )
Examples
========
>>> from sympy.stats import skewness, Exponential, smoment
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True, finite=True)
>>> Y = Exponential('Y', rate)
>>> smoment(Y, 4)
9
>>> smoment(Y, 4) == smoment(3*Y, 4)
True
>>> smoment(Y, 3) == skewness(Y)
True
"""
sigma = std(X, condition, **kwargs)
return (1/sigma)**n*cmoment(X, n, condition, **kwargs)
def skewness(X, condition=None, **kwargs):
"""
Measure of the asymmetry of the probability distribution
Positive skew indicates that most of the values lie to the right of
the mean
skewness(X) = E( ((X - E(X))/sigma)**3 )
Examples
========
>>> from sympy.stats import skewness, Exponential, Normal
>>> from sympy import Symbol
>>> X = Normal('X', 0, 1)
>>> skewness(X)
0
>>> rate = Symbol('lambda', positive=True, real=True, finite=True)
>>> Y = Exponential('Y', rate)
>>> skewness(Y)
2
"""
return smoment(X, 3, condition, **kwargs)
P = probability
E = expectation
| bsd-3-clause |
chitr/neutron | neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/test_mech_linuxbridge.py | 34 | 2980 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.plugins.ml2.drivers.linuxbridge.mech_driver \
import mech_linuxbridge
from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base
class LinuxbridgeMechanismBaseTestCase(base.AgentMechanismBaseTestCase):
VIF_TYPE = portbindings.VIF_TYPE_BRIDGE
CAP_PORT_FILTER = True
AGENT_TYPE = constants.AGENT_TYPE_LINUXBRIDGE
GOOD_MAPPINGS = {'fake_physical_network': 'fake_interface'}
GOOD_TUNNEL_TYPES = ['gre', 'vxlan']
GOOD_CONFIGS = {'interface_mappings': GOOD_MAPPINGS,
'tunnel_types': GOOD_TUNNEL_TYPES}
BAD_MAPPINGS = {'wrong_physical_network': 'wrong_interface'}
BAD_TUNNEL_TYPES = ['bad_tunnel_type']
BAD_CONFIGS = {'interface_mappings': BAD_MAPPINGS,
'tunnel_types': BAD_TUNNEL_TYPES}
AGENTS = [{'alive': True,
'configurations': GOOD_CONFIGS,
'host': 'host'}]
AGENTS_DEAD = [{'alive': False,
'configurations': GOOD_CONFIGS,
'host': 'dead_host'}]
AGENTS_BAD = [{'alive': False,
'configurations': GOOD_CONFIGS,
'host': 'bad_host_1'},
{'alive': True,
'configurations': BAD_CONFIGS,
'host': 'bad_host_2'}]
def setUp(self):
super(LinuxbridgeMechanismBaseTestCase, self).setUp()
self.driver = mech_linuxbridge.LinuxbridgeMechanismDriver()
self.driver.initialize()
class LinuxbridgeMechanismGenericTestCase(LinuxbridgeMechanismBaseTestCase,
base.AgentMechanismGenericTestCase):
pass
class LinuxbridgeMechanismLocalTestCase(LinuxbridgeMechanismBaseTestCase,
base.AgentMechanismLocalTestCase):
pass
class LinuxbridgeMechanismFlatTestCase(LinuxbridgeMechanismBaseTestCase,
base.AgentMechanismFlatTestCase):
pass
class LinuxbridgeMechanismVlanTestCase(LinuxbridgeMechanismBaseTestCase,
base.AgentMechanismVlanTestCase):
pass
class LinuxbridgeMechanismGreTestCase(LinuxbridgeMechanismBaseTestCase,
base.AgentMechanismGreTestCase):
pass
| apache-2.0 |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Coupled_Contact/Steady_State_Single_Foundation_Sysytem_Under_Tension/CoupledSoftContact/n_0.3/compare_txt.py | 637 | 2094 | #!/usr/bin/python
import h5py
import sys
import numpy as np
import os
import re
import random
# find the path to my own python function:
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own function for color and comparator
from mycomparator import *
from mycolor_fun import *
# analytic_solution = sys.argv[1]
# numeric_result = sys.argv[2]
analytic_solution = 'analytic_solution.txt'
numeric_result = 'numeric_result.txt'
analytic_sol = np.loadtxt(analytic_solution)
numeric_res = np.loadtxt(numeric_result)
abs_error = abs(analytic_sol - numeric_res)
rel_error = abs_error/analytic_sol
analytic_sol = float(analytic_sol)
numeric_res = float(numeric_res)
rel_error = float(rel_error)
# print the results
case_flag=1
print headrun() , "-----------Testing results-----------------"
print headstep() ,'{0} {1} {2} '.format('analytic_solution ','numeric_result ','error[%]')
print headOK() ,'{0:+e} {1:+e} {2:+0.2f} '.format(analytic_sol, numeric_res, rel_error )
if(case_flag==1):
print headOKCASE(),"-----------Done this case!-----------------"
# legacy backup
# find . -name 'element.fei' -exec bash -c 'mv $0 ${0/element.fei/add_element.include}' {} \;
# find . -name 'constraint.fei' -exec bash -c 'mv $0 ${0/constraint.fei/add_constraint.include}' {} \;
# find . -name 'node.fei' -exec bash -c 'mv $0 ${0/node.fei/add_node.include}' {} \;
# find . -name 'add_node.fei' -exec bash -c 'mv $0 ${0/add_node.fei/add_node.include}' {} \;
# find . -name 'elementLT.fei' -exec bash -c 'mv $0 ${0/elementLT.fei/add_elementLT.include}' {} \;
# sed -i "s/node\.fei/add_node.include/" main.fei
# sed -i "s/add_node\.fei/add_node.include/" main.fei
# sed -i "s/element\.fei/add_element.include/" main.fei
# sed -i "s/elementLT\.fei/add_elementLT.include/" main.fei
# sed -i "s/constraint\.fei/add_constraint.include/" main.fei
# find . -name '*_bak.h5.feioutput' -exec bash -c 'mv $0 ${0/\_bak.h5.feioutput/\_original\.h5.feioutput}' {} \;
| cc0-1.0 |
MrReN/django-oscar | oscar/apps/dashboard/vouchers/forms.py | 1 | 2964 | from django import forms
from oscar.core.loading import get_model
from django.utils.translation import ugettext_lazy as _
Voucher = get_model('voucher', 'Voucher')
Benefit = get_model('offer', 'Benefit')
Range = get_model('offer', 'Range')
class VoucherForm(forms.Form):
"""
A specialised form for creating a voucher and offer
model.
"""
name = forms.CharField(label=_("Name"))
code = forms.CharField(label=_("Code"))
start_date = forms.DateField(label=_("Start date"))
end_date = forms.DateField(label=_("End date"))
usage = forms.ChoiceField(choices=Voucher.USAGE_CHOICES, label=_("Usage"))
benefit_range = forms.ModelChoiceField(
label=_('Which products get a discount?'),
queryset=Range.objects.all(),
)
type_choices = (
(Benefit.PERCENTAGE, _('Percentage off of products in range')),
(Benefit.FIXED, _('Fixed amount off of products in range')),
)
benefit_type = forms.ChoiceField(
choices=type_choices,
label=_('Discount type'),
)
benefit_value = forms.DecimalField(
label=_('Discount value'))
def __init__(self, voucher=None, *args, **kwargs):
self.voucher = voucher
super(VoucherForm, self).__init__(*args, **kwargs)
def clean_name(self):
name = self.cleaned_data['name']
try:
voucher = Voucher.objects.get(name=name)
except Voucher.DoesNotExist:
pass
else:
if (not self.voucher) or (voucher.id != self.voucher.id):
raise forms.ValidationError(_("The name '%s' is already in"
" use") % name)
return name
def clean_code(self):
code = self.cleaned_data['code'].strip().upper()
if not code:
raise forms.ValidationError(_("Please enter a voucher code"))
try:
voucher = Voucher.objects.get(code=code)
except Voucher.DoesNotExist:
pass
else:
if (not self.voucher) or (voucher.id != self.voucher.id):
raise forms.ValidationError(_("The code '%s' is already in"
" use") % code)
return code
def clean(self):
cleaned_data = super(VoucherForm, self).clean()
start_date = cleaned_data.get('start_date', None)
end_date = cleaned_data.get('end_date', None)
if start_date and end_date and end_date < start_date:
raise forms.ValidationError(_("The start date must be before the"
" end date"))
return cleaned_data
class VoucherSearchForm(forms.Form):
name = forms.CharField(required=False, label=_("Name"))
code = forms.CharField(required=False, label=_("Code"))
is_active = forms.BooleanField(required=False, label=_("Is Active?"))
def clean_code(self):
return self.cleaned_data['code'].upper()
| bsd-3-clause |
elancom/kafka | system_test/offset_management_testsuite/offset_management_test.py | 70 | 16827 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# offset_management_test.py
# ===================================
import os
import signal
import sys
import time
import traceback
from system_test_env import SystemTestEnv
sys.path.append(SystemTestEnv.SYSTEM_TEST_UTIL_DIR)
from setup_utils import SetupUtils
from replication_utils import ReplicationUtils
import system_test_utils
from testcase_env import TestcaseEnv
# product specific: Kafka
import kafka_system_test_utils
import metrics
class OffsetManagementTest(ReplicationUtils, SetupUtils):
testModuleAbsPathName = os.path.realpath(__file__)
testSuiteAbsPathName = os.path.abspath(os.path.dirname(testModuleAbsPathName))
def __init__(self, systemTestEnv):
# SystemTestEnv - provides cluster level environment settings
# such as entity_id, hostname, kafka_home, java_home which
# are available in a list of dictionary named
# "clusterEntityConfigDictList"
self.systemTestEnv = systemTestEnv
super(OffsetManagementTest, self).__init__(self)
# dict to pass user-defined attributes to logger argument: "extra"
d = {'name_of_class': self.__class__.__name__}
def signal_handler(self, signal, frame):
self.log_message("Interrupt detected - User pressed Ctrl+c")
# perform the necessary cleanup here when user presses Ctrl+c and it may be product specific
self.log_message("stopping all entities - please wait ...")
kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
sys.exit(1)
def runTest(self):
# ======================================================================
# get all testcase directories under this testsuite
# ======================================================================
testCasePathNameList = system_test_utils.get_dir_paths_with_prefix(
self.testSuiteAbsPathName, SystemTestEnv.SYSTEM_TEST_CASE_PREFIX)
testCasePathNameList.sort()
replicationUtils = ReplicationUtils(self)
# =============================================================
# launch each testcase one by one: testcase_1, testcase_2, ...
# =============================================================
for testCasePathName in testCasePathNameList:
skipThisTestCase = False
try:
# ======================================================================
# A new instance of TestcaseEnv to keep track of this testcase's env vars
# and initialize some env vars as testCasePathName is available now
# ======================================================================
self.testcaseEnv = TestcaseEnv(self.systemTestEnv, self)
self.testcaseEnv.testSuiteBaseDir = self.testSuiteAbsPathName
self.testcaseEnv.initWithKnownTestCasePathName(testCasePathName)
self.testcaseEnv.testcaseArgumentsDict = self.testcaseEnv.testcaseNonEntityDataDict["testcase_args"]
# ======================================================================
# SKIP if this case is IN testcase_to_skip.json or NOT IN testcase_to_run.json
# ======================================================================
testcaseDirName = self.testcaseEnv.testcaseResultsDict["_test_case_name"]
if self.systemTestEnv.printTestDescriptionsOnly:
self.testcaseEnv.printTestCaseDescription(testcaseDirName)
continue
elif self.systemTestEnv.isTestCaseToSkip(self.__class__.__name__, testcaseDirName):
self.log_message("Skipping : " + testcaseDirName)
skipThisTestCase = True
continue
else:
self.testcaseEnv.printTestCaseDescription(testcaseDirName)
system_test_utils.setup_remote_hosts_with_testcase_level_cluster_config(self.systemTestEnv, testCasePathName)
# ============================================================================== #
# ============================================================================== #
# Product Specific Testing Code Starts Here: #
# ============================================================================== #
# ============================================================================== #
# initialize self.testcaseEnv with user-defined environment variables (product specific)
self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = False
self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = False
# initialize signal handler
signal.signal(signal.SIGINT, self.signal_handler)
# TestcaseEnv.testcaseConfigsList initialized by reading testcase properties file:
# system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
self.testcaseEnv.testcaseConfigsList = system_test_utils.get_json_list_data(
self.testcaseEnv.testcasePropJsonPathName)
# clean up data directories specified in zookeeper.properties and kafka_server_<n>.properties
kafka_system_test_utils.cleanup_data_at_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# create "LOCAL" log directories for metrics, dashboards for each entity under this testcase
# for collecting logs from remote machines
kafka_system_test_utils.generate_testcase_log_dirs(self.systemTestEnv, self.testcaseEnv)
# TestcaseEnv - initialize producer & consumer config / log file pathnames
kafka_system_test_utils.init_entity_props(self.systemTestEnv, self.testcaseEnv)
# generate remote hosts log/config dirs if not exist
kafka_system_test_utils.generate_testcase_log_dirs_in_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# generate properties files for zookeeper, kafka, producer, and consumer:
# 1. copy system_test/<suite_name>_testsuite/config/*.properties to
# system_test/<suite_name>_testsuite/testcase_<n>/config/
# 2. update all properties files in system_test/<suite_name>_testsuite/testcase_<n>/config
# by overriding the settings specified in:
# system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
kafka_system_test_utils.generate_overriden_props_files(self.testSuiteAbsPathName,
self.testcaseEnv, self.systemTestEnv)
# =============================================
# preparing all entities to start the test
# =============================================
self.log_message("starting zookeepers")
kafka_system_test_utils.start_zookeepers(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 2s")
time.sleep(2)
self.log_message("starting brokers")
kafka_system_test_utils.start_brokers(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 5s")
time.sleep(5)
self.log_message("creating offset topic")
kafka_system_test_utils.create_topic(self.systemTestEnv, self.testcaseEnv, "__consumer_offsets", 3, 2)
self.anonLogger.info("sleeping for 5s")
time.sleep(5)
# =============================================
# starting producer
# =============================================
self.log_message("starting producer in the background")
kafka_system_test_utils.start_producer_performance(self.systemTestEnv, self.testcaseEnv, False)
msgProducingFreeTimeSec = self.testcaseEnv.testcaseArgumentsDict["message_producing_free_time_sec"]
self.anonLogger.info("sleeping for " + msgProducingFreeTimeSec + " sec to produce some messages")
time.sleep(int(msgProducingFreeTimeSec))
kafka_system_test_utils.start_console_consumers(self.systemTestEnv, self.testcaseEnv)
kafka_system_test_utils.get_leader_for(self.systemTestEnv, self.testcaseEnv, "__consumer_offsets", 0)
# =============================================
# A while-loop to bounce consumers as specified
# by "num_iterations" in testcase_n_properties.json
# =============================================
i = 1
numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"])
bouncedEntityDownTimeSec = 10
try:
bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"])
except:
pass
# group1 -> offsets partition 0 // has one consumer; eid: 6
# group2 -> offsets partition 1 // has four consumers; eid: 7, 8, 9, 10
offsets_0_leader_entity = kafka_system_test_utils.get_leader_for(self.systemTestEnv, self.testcaseEnv, "__consumer_offsets", 0)
offsets_1_leader_entity = kafka_system_test_utils.get_leader_for(self.systemTestEnv, self.testcaseEnv, "__consumer_offsets", 1)
while i <= numIterations:
self.log_message("Iteration " + str(i) + " of " + str(numIterations))
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, offsets_0_leader_entity, self.testcaseEnv.entityBrokerParentPidDict[offsets_0_leader_entity])
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, offsets_1_leader_entity, self.testcaseEnv.entityBrokerParentPidDict[offsets_1_leader_entity])
# =============================================
# Bounce consumers if specified in testcase config
# =============================================
bounceConsumers = self.testcaseEnv.testcaseArgumentsDict["bounce_consumers"]
self.log_message("bounce_consumers flag : " + bounceConsumers)
if (bounceConsumers.lower() == "true"):
clusterConfigList = self.systemTestEnv.clusterEntityConfigDictList
consumerEntityIdList = system_test_utils.get_data_from_list_of_dicts( clusterConfigList, "role", "console_consumer", "entity_id")
for stoppedConsumerEntityId in consumerEntityIdList:
consumerPPID = self.testcaseEnv.entityConsoleConsumerParentPidDict[stoppedConsumerEntityId]
self.log_message("stopping consumer: " + consumerPPID)
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedConsumerEntityId, consumerPPID)
self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec")
time.sleep(bouncedEntityDownTimeSec)
# leaders would have changed during the above bounce.
self.log_message("starting the previously terminated consumers.")
for stoppedConsumerEntityId in consumerEntityIdList:
# starting previously terminated consumer
kafka_system_test_utils.start_console_consumers(self.systemTestEnv, self.testcaseEnv, stoppedConsumerEntityId)
self.log_message("starting the previously terminated brokers")
kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, offsets_0_leader_entity)
kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, offsets_1_leader_entity)
self.anonLogger.info("sleeping for 15s")
time.sleep(15)
i += 1
# while loop
# =============================================
# tell producer to stop
# =============================================
self.testcaseEnv.lock.acquire()
self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True
time.sleep(1)
self.testcaseEnv.lock.release()
time.sleep(1)
# =============================================
# wait for producer thread's update of
# "backgroundProducerStopped" to be "True"
# =============================================
while 1:
self.testcaseEnv.lock.acquire()
self.logger.info("status of backgroundProducerStopped : [" + \
str(self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=self.d)
if self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]:
time.sleep(1)
self.logger.info("all producer threads completed", extra=self.d)
break
time.sleep(1)
self.testcaseEnv.lock.release()
time.sleep(2)
self.anonLogger.info("sleeping for 15s")
time.sleep(15)
# =============================================
# this testcase is completed - stop all entities
# =============================================
self.log_message("stopping all entities")
for entityId, parentPid in self.testcaseEnv.entityBrokerParentPidDict.items():
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
for entityId, parentPid in self.testcaseEnv.entityZkParentPidDict.items():
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
# make sure all entities are stopped
kafka_system_test_utils.ps_grep_terminate_running_entity(self.systemTestEnv)
# =============================================
# collect logs from remote hosts
# =============================================
kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# =============================================
# validate the data matched and checksum
# =============================================
self.log_message("validating data matched")
kafka_system_test_utils.validate_data_matched_in_multi_topics_from_single_consumer_producer(self.systemTestEnv, self.testcaseEnv, replicationUtils)
except Exception as e:
self.log_message("Exception while running test {0}".format(e))
traceback.print_exc()
self.testcaseEnv.validationStatusDict["Test completed"] = "FAILED"
finally:
if not skipThisTestCase and not self.systemTestEnv.printTestDescriptionsOnly:
self.log_message("stopping all entities - please wait ...")
kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
| apache-2.0 |
beni55/sympy | sympy/core/logic.py | 7 | 9362 | """Logic expressions handling
NOTE
----
at present this is mainly needed for facts.py , feel free however to improve
this stuff for general purpose.
"""
from __future__ import print_function, division
from sympy.core.compatibility import iterable
def _fuzzy_group(args, quick_exit=False):
"""Return True if all args are True, None if there is any None else False
unless ``quick_exit`` is True (then return None as soon as a second False
is seen.
``_fuzzy_group`` is like ``fuzzy_and`` except that it is more
conservative in returning a False, waiting to make sure that all
arguments are True or False and returning None if any arguments are
None. It also has the capability of permiting only a single False and
returning None if more than one is seen. For example, the presence of a
single transcendental amongst rationals would indicate that the group is
no longer rational; but a second transcendental in the group would make the
determination impossible.
Examples
========
>>> from sympy.core.logic import _fuzzy_group
By default, multiple Falses mean the group is broken:
>>> _fuzzy_group([False, False, True])
False
If multiple Falses mean the group status is unknown then set
`quick_exit` to True so None can be returned when the 2nd False is seen:
>>> _fuzzy_group([False, False, True], quick_exit=True)
But if only a single False is seen then the group is known to
be broken:
>>> _fuzzy_group([False, True, True], quick_exit=True)
False
"""
saw_other = False
for a in args:
if a is True:
continue
if a is None:
return
if quick_exit and saw_other:
return
saw_other = True
return not saw_other
def fuzzy_bool(x):
"""Return True, False or None according to x.
Whereas bool(x) returns True or False, fuzzy_bool allows
for the None value.
"""
if x is None:
return None
return bool(x)
def fuzzy_and(args):
"""Return True (all True), False (any False) or None.
Examples
========
>>> from sympy.core.logic import fuzzy_and
>>> from sympy import Dummy
If you had a list of objects to test the commutivity of
and you want the fuzzy_and logic applied, passing an
iterator will allow the commutativity to only be computed
as many times as necessary. With this list, False can be
returned after analyzing the first symbol:
>>> syms = [Dummy(commutative=False), Dummy()]
>>> fuzzy_and(s.is_commutative for s in syms)
False
That False would require less work than if a list of pre-computed
items was sent:
>>> fuzzy_and([s.is_commutative for s in syms])
False
"""
rv = True
for ai in args:
ai = fuzzy_bool(ai)
if ai is False:
return False
if rv: # this will stop updating if a None is ever trapped
rv = ai
return rv
def fuzzy_not(v):
"""
Not in fuzzy logic
Return None if `v` is None else `not v`.
Examples
========
>>> from sympy.core.logic import fuzzy_not
>>> fuzzy_not(True)
False
>>> fuzzy_not(None)
>>> fuzzy_not(False)
True
"""
if v is None:
return v
else:
return not v
def fuzzy_or(args):
"""
Or in fuzzy logic. Returns True (any True), False (all False), or None
See the docstrings of fuzzy_and and fuzzy_not for more info. fuzzy_or is
related to the two by the standard De Morgan's law.
>>> from sympy.core.logic import fuzzy_or
>>> fuzzy_or([True, False])
True
>>> fuzzy_or([True, None])
True
>>> fuzzy_or([False, False])
False
>>> print(fuzzy_or([False, None]))
None
"""
return fuzzy_not(fuzzy_and(fuzzy_not(i) for i in args))
class Logic(object):
"""Logical expression"""
# {} 'op' -> LogicClass
op_2class = {}
def __new__(cls, *args):
obj = object.__new__(cls)
obj.args = args
return obj
def __getnewargs__(self):
return self.args
def __hash__(self):
return hash( (type(self).__name__,) + tuple(self.args) )
def __eq__(a, b):
if not isinstance(b, type(a)):
return False
else:
return a.args == b.args
def __ne__(a, b):
if not isinstance(b, type(a)):
return True
else:
return a.args != b.args
def __lt__(self, other):
if self.__cmp__(other) == -1:
return True
return False
def __cmp__(self, other):
if type(self) is not type(other):
a = str(type(self))
b = str(type(other))
else:
a = self.args
b = other.args
return (a > b) - (a < b)
def __str__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join(str(a) for a in self.args))
__repr__ = __str__
@staticmethod
def fromstring(text):
"""Logic from string with space around & and | but none after !.
e.g.
!a & b | c
"""
lexpr = None # current logical expression
schedop = None # scheduled operation
for term in text.split():
# operation symbol
if term in '&|':
if schedop is not None:
raise ValueError(
'double op forbidden: "%s %s"' % (term, schedop))
if lexpr is None:
raise ValueError(
'%s cannot be in the beginning of expression' % term)
schedop = term
continue
if '&' in term or '|' in term:
raise ValueError('& and | must have space around them')
if term[0] == '!':
if len(term) == 1:
raise ValueError('do not include space after "!"')
term = Not(term[1:])
# already scheduled operation, e.g. '&'
if schedop:
lexpr = Logic.op_2class[schedop](lexpr, term)
schedop = None
continue
# this should be atom
if lexpr is not None:
raise ValueError(
'missing op between "%s" and "%s"' % (lexpr, term))
lexpr = term
# let's check that we ended up in correct state
if schedop is not None:
raise ValueError('premature end-of-expression in "%s"' % text)
if lexpr is None:
raise ValueError('"%s" is empty' % text)
# everything looks good now
return lexpr
class AndOr_Base(Logic):
def __new__(cls, *args):
bargs = []
for a in args:
if a == cls.op_x_notx:
return a
elif a == (not cls.op_x_notx):
continue # skip this argument
bargs.append(a)
args = sorted(set(cls.flatten(bargs)), key=hash)
for a in args:
if Not(a) in args:
return cls.op_x_notx
if len(args) == 1:
return args.pop()
elif len(args) == 0:
return not cls.op_x_notx
return Logic.__new__(cls, *args)
@classmethod
def flatten(cls, args):
# quick-n-dirty flattening for And and Or
args_queue = list(args)
res = []
while True:
try:
arg = args_queue.pop(0)
except IndexError:
break
if isinstance(arg, Logic):
if isinstance(arg, cls):
args_queue.extend(arg.args)
continue
res.append(arg)
args = tuple(res)
return args
class And(AndOr_Base):
op_x_notx = False
def _eval_propagate_not(self):
# !(a&b&c ...) == !a | !b | !c ...
return Or( *[Not(a) for a in self.args] )
# (a|b|...) & c == (a&c) | (b&c) | ...
def expand(self):
# first locate Or
for i in range(len(self.args)):
arg = self.args[i]
if isinstance(arg, Or):
arest = self.args[:i] + self.args[i + 1:]
orterms = [And( *(arest + (a,)) ) for a in arg.args]
for j in range(len(orterms)):
if isinstance(orterms[j], Logic):
orterms[j] = orterms[j].expand()
res = Or(*orterms)
return res
else:
return self
class Or(AndOr_Base):
op_x_notx = True
def _eval_propagate_not(self):
# !(a|b|c ...) == !a & !b & !c ...
return And( *[Not(a) for a in self.args] )
class Not(Logic):
def __new__(cls, arg):
if isinstance(arg, str):
return Logic.__new__(cls, arg)
elif isinstance(arg, bool):
return not arg
elif isinstance(arg, Not):
return arg.args[0]
elif isinstance(arg, Logic):
# XXX this is a hack to expand right from the beginning
arg = arg._eval_propagate_not()
return arg
else:
raise ValueError('Not: unknown argument %r' % (arg,))
@property
def arg(self):
return self.args[0]
Logic.op_2class['&'] = And
Logic.op_2class['|'] = Or
Logic.op_2class['!'] = Not
| bsd-3-clause |
zhaochao/fuel-web | nailgun/nailgun/test/integration/test_public_api.py | 1 | 3000 | # -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from oslo.serialization import jsonutils
from nailgun.test.base import BaseAuthenticationIntegrationTest
from nailgun.test.base import reverse
class TestPublicHandlers(BaseAuthenticationIntegrationTest):
def test_node_agent_api(self):
self.env.create_node(
api=False,
status='provisioning',
meta=self.env.default_metadata()
)
node_db = self.env.nodes[0]
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(
{'mac': node_db.mac,
'status': 'discover', 'manufacturer': 'new'}
),
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
resp = self.app.post(
reverse('NodeCollectionHandler'),
jsonutils.dumps({'mac': self.env.generate_random_mac(),
'status': 'discover'}),
headers=self.default_headers)
self.assertEqual(201, resp.status_code)
def test_version_api(self):
resp = self.app.get(
reverse('VersionHandler'),
headers=self.default_headers
)
self.assertEqual(200, resp.status_code)
@patch('nailgun.api.v1.handlers.version.utils.get_fuel_release_versions')
def test_500_no_html_dev(self, handler_get):
exc_text = "Here goes an exception"
handler_get.side_effect = Exception(exc_text)
resp = self.app.get(
reverse('VersionHandler'),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(500, resp.status_code)
self.assertIn(exc_text, resp.body)
self.assertIn("Traceback", resp.body)
self.assertNotIn("html", resp.body)
@patch('nailgun.api.v1.handlers.version.utils.get_fuel_release_versions')
def test_500_no_html_production(self, handler_get):
exc_text = "Here goes an exception"
handler_get.side_effect = Exception(exc_text)
with patch('nailgun.settings.settings.DEVELOPMENT', 0):
resp = self.app.get(
reverse('VersionHandler'),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(500, resp.status_code)
self.assertEqual(exc_text, resp.body)
| apache-2.0 |
louietsai/python-for-android | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_idioms.py | 203 | 4876 | """Adjust some old Python 2 idioms to their modern counterparts.
* Change some type comparisons to isinstance() calls:
type(x) == T -> isinstance(x, T)
type(x) is T -> isinstance(x, T)
type(x) != T -> not isinstance(x, T)
type(x) is not T -> not isinstance(x, T)
* Change "while 1:" into "while True:".
* Change both
v = list(EXPR)
v.sort()
foo(v)
and the more general
v = EXPR
v.sort()
foo(v)
into
v = sorted(EXPR)
foo(v)
"""
# Author: Jacques Frechet, Collin Winter
# Local imports
from .. import fixer_base
from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms
CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
TYPE = "power< 'type' trailer< '(' x=any ')' > >"
class FixIdioms(fixer_base.BaseFix):
explicit = True # The user must ask for this fixer
PATTERN = r"""
isinstance=comparison< %s %s T=any >
|
isinstance=comparison< T=any %s %s >
|
while_stmt< 'while' while='1' ':' any+ >
|
sorted=any<
any*
simple_stmt<
expr_stmt< id1=any '='
power< list='list' trailer< '(' (not arglist<any+>) any ')' > >
>
'\n'
>
sort=
simple_stmt<
power< id2=any
trailer< '.' 'sort' > trailer< '(' ')' >
>
'\n'
>
next=any*
>
|
sorted=any<
any*
simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' >
sort=
simple_stmt<
power< id2=any
trailer< '.' 'sort' > trailer< '(' ')' >
>
'\n'
>
next=any*
>
""" % (TYPE, CMP, CMP, TYPE)
def match(self, node):
r = super(FixIdioms, self).match(node)
# If we've matched one of the sort/sorted subpatterns above, we
# want to reject matches where the initial assignment and the
# subsequent .sort() call involve different identifiers.
if r and "sorted" in r:
if r["id1"] == r["id2"]:
return r
return None
return r
def transform(self, node, results):
if "isinstance" in results:
return self.transform_isinstance(node, results)
elif "while" in results:
return self.transform_while(node, results)
elif "sorted" in results:
return self.transform_sort(node, results)
else:
raise RuntimeError("Invalid match")
def transform_isinstance(self, node, results):
x = results["x"].clone() # The thing inside of type()
T = results["T"].clone() # The type being compared against
x.prefix = ""
T.prefix = " "
test = Call(Name("isinstance"), [x, Comma(), T])
if "n" in results:
test.prefix = " "
test = Node(syms.not_test, [Name("not"), test])
test.prefix = node.prefix
return test
def transform_while(self, node, results):
one = results["while"]
one.replace(Name("True", prefix=one.prefix))
def transform_sort(self, node, results):
sort_stmt = results["sort"]
next_stmt = results["next"]
list_call = results.get("list")
simple_expr = results.get("expr")
if list_call:
list_call.replace(Name("sorted", prefix=list_call.prefix))
elif simple_expr:
new = simple_expr.clone()
new.prefix = ""
simple_expr.replace(Call(Name("sorted"), [new],
prefix=simple_expr.prefix))
else:
raise RuntimeError("should not have reached here")
sort_stmt.remove()
btwn = sort_stmt.prefix
# Keep any prefix lines between the sort_stmt and the list_call and
# shove them right after the sorted() call.
if "\n" in btwn:
if next_stmt:
# The new prefix should be everything from the sort_stmt's
# prefix up to the last newline, then the old prefix after a new
# line.
prefix_lines = (btwn.rpartition("\n")[0], next_stmt[0].prefix)
next_stmt[0].prefix = "\n".join(prefix_lines)
else:
assert list_call.parent
assert list_call.next_sibling is None
# Put a blank line after list_call and set its prefix.
end_line = BlankLine()
list_call.parent.append_child(end_line)
assert list_call.next_sibling is end_line
# The new prefix should be everything up to the first new line
# of sort_stmt's prefix.
end_line.prefix = btwn.rpartition("\n")[0]
| apache-2.0 |
seem-sky/kbengine | kbe/src/lib/python/Lib/posixpath.py | 92 | 13448 | """Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import sys
import stat
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount", "expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath"]
# Strings representing various path-related bits and pieces.
# These are primarily for export; internally, they are hardcoded.
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
def _get_sep(path):
if isinstance(path, bytes):
return b'/'
else:
return '/'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
if not isinstance(s, (bytes, str)):
raise TypeError("normcase() argument must be str or bytes, "
"not '{}'".format(s.__class__.__name__))
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
sep = _get_sep(s)
return s.startswith(sep)
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator."""
sep = _get_sep(a)
path = a
try:
for b in p:
if b.startswith(sep):
path = b
elif not path or path.endswith(sep):
path += b
else:
path += sep + b
except TypeError:
if all(isinstance(s, (str, bytes)) for s in (a,) + p):
# Must have a mixture of text and binary data
raise TypeError("Can't mix strings and bytes in path "
"components") from None
raise
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head, tail = p[:i], p[i:]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
if isinstance(p, bytes):
sep = b'/'
extsep = b'.'
else:
sep = '/'
extsep = '.'
return genericpath._splitext(p, sep, None, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return p[:0], p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head = p[:i]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (OSError, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
os.lstat(path)
except OSError:
return False
return True
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
try:
s1 = os.lstat(path)
except OSError:
# It doesn't exist -- so not a mount point. :-)
return False
else:
# A symlink can never be a mount point
if stat.S_ISLNK(s1.st_mode):
return False
if isinstance(path, bytes):
parent = join(path, b'..')
else:
parent = join(path, '..')
try:
s2 = os.lstat(parent)
except OSError:
return False
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if isinstance(path, bytes):
tilde = b'~'
else:
tilde = '~'
if not path.startswith(tilde):
return path
sep = _get_sep(path)
i = path.find(sep, 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
import pwd
name = path[1:i]
if isinstance(name, bytes):
name = str(name, 'ASCII')
try:
pwent = pwd.getpwnam(name)
except KeyError:
return path
userhome = pwent.pw_dir
if isinstance(path, bytes):
userhome = os.fsencode(userhome)
root = b'/'
else:
root = '/'
userhome = userhome.rstrip(root)
return (userhome + path[i:]) or root
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
_varprogb = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
global _varprog, _varprogb
if isinstance(path, bytes):
if b'$' not in path:
return path
if not _varprogb:
import re
_varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprogb.search
start = b'{'
end = b'}'
environ = getattr(os, 'environb', None)
else:
if '$' not in path:
return path
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprog.search
start = '{'
end = '}'
environ = os.environ
i = 0
while True:
m = search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith(start) and name.endswith(end):
name = name[1:-1]
try:
if environ is None:
value = os.fsencode(os.environ[os.fsdecode(name)])
else:
value = environ[name]
except KeyError:
i = j
else:
tail = path[j:]
path = path[:i] + value
i = len(path)
path += tail
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
if isinstance(path, bytes):
sep = b'/'
empty = b''
dot = b'.'
dotdot = b'..'
else:
sep = '/'
empty = ''
dot = '.'
dotdot = '..'
if path == empty:
return dot
initial_slashes = path.startswith(sep)
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith(sep*2) and not path.startswith(sep*3)):
initial_slashes = 2
comps = path.split(sep)
new_comps = []
for comp in comps:
if comp in (empty, dot):
continue
if (comp != dotdot or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == dotdot)):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = sep.join(comps)
if initial_slashes:
path = sep*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
path, ok = _joinrealpath(filename[:0], filename, {})
return abspath(path)
# Join two paths, normalizing ang eliminating any symbolic links
# encountered in the second path.
def _joinrealpath(path, rest, seen):
if isinstance(path, bytes):
sep = b'/'
curdir = b'.'
pardir = b'..'
else:
sep = '/'
curdir = '.'
pardir = '..'
if isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
# current dir
continue
if name == pardir:
# parent dir
if path:
path, name = split(path)
if name == pardir:
path = join(path, pardir, pardir)
else:
path = pardir
continue
newpath = join(path, name)
if not islink(newpath):
path = newpath
continue
# Resolve the symbolic link
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
# Return already resolved part + rest of the path unchanged.
return join(newpath, rest), False
seen[newpath] = None # not resolved symlink
path, ok = _joinrealpath(path, os.readlink(newpath), seen)
if not ok:
return join(path, rest), False
seen[newpath] = path # resolved symlink
return path, True
supports_unicode_filenames = (sys.platform == 'darwin')
def relpath(path, start=None):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
if isinstance(path, bytes):
curdir = b'.'
sep = b'/'
pardir = b'..'
else:
curdir = '.'
sep = '/'
pardir = '..'
if start is None:
start = curdir
start_list = [x for x in abspath(start).split(sep) if x]
path_list = [x for x in abspath(path).split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| lgpl-3.0 |
tizianasellitto/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_tmpdir.py | 173 | 6017 | import sys
import py
import pytest
from _pytest.tmpdir import tmpdir
def test_funcarg(testdir):
testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.addcall(id='a')
metafunc.addcall(id='b')
def test_func(tmpdir): pass
""")
from _pytest.tmpdir import TempdirFactory
reprec = testdir.inline_run()
calls = reprec.getcalls("pytest_runtest_setup")
item = calls[0].item
config = item.config
tmpdirhandler = TempdirFactory(config)
item._initrequest()
p = tmpdir(item._request, tmpdirhandler)
assert p.check()
bn = p.basename.strip("0123456789")
assert bn.endswith("test_func_a_")
item.name = "qwe/\\abc"
p = tmpdir(item._request, tmpdirhandler)
assert p.check()
bn = p.basename.strip("0123456789")
assert bn == "qwe__abc"
def test_ensuretemp(recwarn):
#pytest.deprecated_call(pytest.ensuretemp, 'hello')
d1 = pytest.ensuretemp('hello')
d2 = pytest.ensuretemp('hello')
assert d1 == d2
assert d1.check(dir=1)
class TestTempdirHandler:
def test_mktemp(self, testdir):
from _pytest.tmpdir import TempdirFactory
config = testdir.parseconfig()
config.option.basetemp = testdir.mkdir("hello")
t = TempdirFactory(config)
tmp = t.mktemp("world")
assert tmp.relto(t.getbasetemp()) == "world0"
tmp = t.mktemp("this")
assert tmp.relto(t.getbasetemp()).startswith("this")
tmp2 = t.mktemp("this")
assert tmp2.relto(t.getbasetemp()).startswith("this")
assert tmp2 != tmp
class TestConfigTmpdir:
def test_getbasetemp_custom_removes_old(self, testdir):
mytemp = testdir.tmpdir.join("xyz")
p = testdir.makepyfile("""
def test_1(tmpdir):
pass
""")
testdir.runpytest(p, '--basetemp=%s' % mytemp)
mytemp.check()
mytemp.ensure("hello")
testdir.runpytest(p, '--basetemp=%s' % mytemp)
mytemp.check()
assert not mytemp.join("hello").check()
def test_basetemp(testdir):
mytemp = testdir.tmpdir.mkdir("mytemp")
p = testdir.makepyfile("""
import pytest
def test_1():
pytest.ensuretemp("hello")
""")
result = testdir.runpytest(p, '--basetemp=%s' % mytemp)
assert result.ret == 0
assert mytemp.join('hello').check()
@pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'),
reason="symlink not available on this platform")
def test_tmpdir_always_is_realpath(testdir):
# the reason why tmpdir should be a realpath is that
# when you cd to it and do "os.getcwd()" you will anyway
# get the realpath. Using the symlinked path can thus
# easily result in path-inequality
# XXX if that proves to be a problem, consider using
# os.environ["PWD"]
realtemp = testdir.tmpdir.mkdir("myrealtemp")
linktemp = testdir.tmpdir.join("symlinktemp")
linktemp.mksymlinkto(realtemp)
p = testdir.makepyfile("""
def test_1(tmpdir):
import os
assert os.path.realpath(str(tmpdir)) == str(tmpdir)
""")
result = testdir.runpytest("-s", p, '--basetemp=%s/bt' % linktemp)
assert not result.ret
def test_tmpdir_too_long_on_parametrization(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize("arg", ["1"*1000])
def test_some(arg, tmpdir):
tmpdir.ensure("hello")
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_tmpdir_factory(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope='session')
def session_dir(tmpdir_factory):
return tmpdir_factory.mktemp('data', numbered=False)
def test_some(session_dir):
session_dir.isdir()
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_tmpdir_fallback_tox_env(testdir, monkeypatch):
"""Test that tmpdir works even if environment variables required by getpass
module are missing (#1010).
"""
monkeypatch.delenv('USER', raising=False)
monkeypatch.delenv('USERNAME', raising=False)
testdir.makepyfile("""
import pytest
def test_some(tmpdir):
assert tmpdir.isdir()
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.fixture
def break_getuser(monkeypatch):
monkeypatch.setattr('os.getuid', lambda: -1)
# taken from python 2.7/3.4
for envvar in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
monkeypatch.delenv(envvar, raising=False)
@pytest.mark.usefixtures("break_getuser")
@pytest.mark.skipif(sys.platform.startswith('win'), reason='no os.getuid on windows')
def test_tmpdir_fallback_uid_not_found(testdir):
"""Test that tmpdir works even if the current process's user id does not
correspond to a valid user.
"""
testdir.makepyfile("""
import pytest
def test_some(tmpdir):
assert tmpdir.isdir()
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.usefixtures("break_getuser")
@pytest.mark.skipif(sys.platform.startswith('win'), reason='no os.getuid on windows')
def test_get_user_uid_not_found():
"""Test that get_user() function works even if the current process's
user id does not correspond to a valid user (e.g. running pytest in a
Docker container with 'docker run -u'.
"""
from _pytest.tmpdir import get_user
assert get_user() is None
@pytest.mark.skipif(not sys.platform.startswith('win'), reason='win only')
def test_get_user(monkeypatch):
"""Test that get_user() function works even if environment variables
required by getpass module are missing from the environment on Windows
(#1010).
"""
from _pytest.tmpdir import get_user
monkeypatch.delenv('USER', raising=False)
monkeypatch.delenv('USERNAME', raising=False)
assert get_user() is None
| mpl-2.0 |
PowerShellEmpire/Empire | lib/modules/powershell/situational_awareness/host/get_proxy.py | 10 | 3067 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-Proxy',
'Author': ['@harmj0y'],
'Description': ("Enumerates the proxy server and WPAD conents for the current user. Part of PowerView."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'ComputerName' : {
'Description' : 'The computername to enumerate proxy settings on.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(moduleCode, moduleName)
script += moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
| bsd-3-clause |
Qalthos/ansible | lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule_facts.py | 12 | 5867 | #!/usr/bin/python
#
# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mysqlfirewallrule_facts
version_added: "2.8"
short_description: Get Azure MySQL Firewall Rule facts.
description:
- Get facts of Azure MySQL Firewall Rule.
options:
resource_group:
description:
- The name of the resource group.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the server firewall rule.
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Get instance of MySQL Firewall Rule
azure_rm_mysqlfirewallrule_facts:
resource_group: myResourceGroup
server_name: server_name
name: firewall_rule_name
- name: List instances of MySQL Firewall Rule
azure_rm_mysqlfirewallrule_facts:
resource_group: myResourceGroup
server_name: server_name
'''
RETURN = '''
rules:
description: A list of dictionaries containing facts for MySQL Firewall Rule.
returned: always
type: complex
contains:
id:
description:
- Resource ID
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforMySQL/servers/testserver/fire
wallRules/rule1"
server_name:
description:
- The name of the server.
returned: always
type: str
sample: testserver
name:
description:
- Resource name.
returned: always
type: str
sample: rule1
start_ip_address:
description:
- The start IP address of the MySQL firewall rule.
returned: always
type: str
sample: 10.0.0.16
end_ip_address:
description:
- The end IP address of the MySQL firewall rule.
returned: always
type: str
sample: 10.0.0.18
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.rdbms.mysql import MySQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMMySqlFirewallRuleFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.mgmt_client = None
self.resource_group = None
self.server_name = None
self.name = None
super(AzureRMMySqlFirewallRuleFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(MySQLManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if (self.name is not None):
self.results['rules'] = self.get()
else:
self.results['rules'] = self.list_by_server()
return self.results
def get(self):
response = None
results = []
try:
response = self.mgmt_client.firewall_rules.get(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for FirewallRules.')
if response is not None:
results.append(self.format_item(response))
return results
def list_by_server(self):
response = None
results = []
try:
response = self.mgmt_client.firewall_rules.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for FirewallRules.')
if response is not None:
for item in response:
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'resource_group': self.resource_group,
'id': d['id'],
'server_name': self.server_name,
'name': d['name'],
'start_ip_address': d['start_ip_address'],
'end_ip_address': d['end_ip_address']
}
return d
def main():
AzureRMMySqlFirewallRuleFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
dsfsdgsbngfggb/odoo | addons/hr_payroll/__openerp__.py | 260 | 2421 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Payroll',
'version': '1.0',
'category': 'Human Resources',
'sequence': 38,
'description': """
Generic Payroll system.
=======================
* Employee Details
* Employee Contracts
* Passport based Contract
* Allowances/Deductions
* Allow to configure Basic/Gross/Net Salary
* Employee Payslip
* Monthly Payroll Register
* Integrated with Holiday Management
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'depends': [
'hr',
'hr_contract',
'hr_holidays',
'decimal_precision',
'report',
],
'data': [
'security/hr_security.xml',
'wizard/hr_payroll_payslips_by_employees.xml',
'hr_payroll_view.xml',
'hr_payroll_workflow.xml',
'hr_payroll_sequence.xml',
'hr_payroll_report.xml',
'hr_payroll_data.xml',
'security/ir.model.access.csv',
'wizard/hr_payroll_contribution_register_report.xml',
'res_config_view.xml',
'views/report_contributionregister.xml',
'views/report_payslip.xml',
'views/report_payslipdetails.xml',
],
'test': [
'test/payslip.yml',
],
'demo': ['hr_payroll_demo.xml'],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/music21/alpha/trecento/findTrecentoFragments.py | 1 | 11333 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''Runs a series of tests against the database to see if any of the following unidentified fragments are in there...'''
from music21 import metadata
from music21 import interval
from music21 import note
from music21 import stream
from music21.alpha.trecento import cadencebook
class IntervalSearcher(object):
def __init__(self, intervalList = []):
self.intervalList = intervalList
self.intervalLength = len(intervalList)
def compareToStream(self, cmpStream):
streamLength = len(cmpStream.flat.notesAndRests)
if self.intervalLength > streamLength:
return False
stIntervalList = cmpStream.melodicIntervals(skipRests = True)
if stIntervalList is None:
return False
stIntervalListLength = len(stIntervalList)
if self.intervalLength > stIntervalListLength:
return False
#print "Length of Stream: " + str(streamLength)
for i in range(0, stIntervalListLength+1 - self.intervalLength):
for j in range(0, self.intervalLength):
streamInterval = stIntervalList[i+j]
genI1 = self.intervalList[j].diatonic.generic.simpleDirected
genI2 = streamInterval.diatonic.generic.simpleDirected
if genI1 != genI2:
break
else:
for colorNote in range(i, i + self.intervalLength):
## does not exactly work because of rests, oh well
cmpStream.notesAndRests[colorNote].editorial.color = "blue"
return True
return False
class NoteSearcher(object):
'''Needs an exact list -- make sure no rests!'''
def __init__(self, noteList = []):
self.noteList = noteList
self.noteLength = len(noteList)
def compareToStream(self, cmpStream):
sN = cmpStream.notesAndRests
streamLength = len(sN)
if streamLength < self.noteLength: return False
for i in range(streamLength + 1 - self.noteLength):
for j in range(self.noteLength):
streamNote = sN[i+j]
if self.noteList[j].isRest != streamNote.isRest:
break
if streamNote.isNote and (self.noteList[j].step != streamNote.step):
break
else: ## success!
for colorNote in range(i, self.noteLength):
sN[colorNote].editorial.color = "blue"
return True
return False
def searchForNotes(notesStr):
'''the notesStr is a string of notes in the following form:
"C4 D4 E4 B3 C4"
that's it: name, octave. With no accidentals. If octave is 0 then
it means do not bother checking for octaves.
Currently octave is ignored anyhow.
'''
notesArr = notesStr.split()
noteObjArr = []
for tN in notesArr:
tNName = tN[0]
if tNName.lower() != "r":
tNObj = note.Note()
tNObj.name = tN[0]
tNObj.octave = int(tN[1])
else:
tNObj = note.Rest()
noteObjArr.append(tNObj)
ballataObj = cadencebook.BallataSheet()
searcher1 = NoteSearcher(noteObjArr)
streamOpus = stream.Opus()
for thisWork in ballataObj:
for thisCadence in thisWork.snippets:
if thisCadence is None:
continue
for i in range(len(thisCadence.parts)):
if searcher1.compareToStream(thisCadence.parts[i].flat) is True:
notesStr = ""
for thisNote in thisCadence.parts[i].flat.notesAndRests:
#thisNote.editorial.color = "blue"
if thisNote.isRest is False:
notesStr += thisNote.nameWithOctave + " "
else:
notesStr += "r "
streamOpus.insert(0, thisCadence)
# streamLily += "\\score {" + \
# "<< \\time " + str(thisCadence.timeSig) + \
# "\n \\new Staff {" + str(thisCadence.parts[i].lily) + "} >>" + \
# thisCadence.header() + "\n}\n"
print(u"In piece %r found in stream %d: %s" % (thisWork.title, i, notesStr))
if any(streamOpus):
streamOpus.show('lily.png')
def searchForIntervals(notesStr):
'''
notesStr is the same as above. Now however we check to see
if the generic intervals are the same, rather than the note names.
Useful if the clef is missing.
'''
notesArr = notesStr.split()
noteObjArr = []
for tN in notesArr:
tNObj = note.Note()
tNObj.name = tN[0]
tNObj.octave = int(tN[1])
noteObjArr.append(tNObj)
interObjArr = []
for i in range(len(noteObjArr) - 1):
int1 = interval.notesToInterval(noteObjArr[i], noteObjArr[i+1])
interObjArr.append(int1)
#print interObjArr
searcher1 = IntervalSearcher(interObjArr)
ballataObj = cadencebook.BallataSheet()
streamOpus = stream.Opus()
for thisWork in ballataObj:
print(thisWork.title)
for thisCadence in thisWork.snippets:
if thisCadence is None:
continue
for i in range(len(thisCadence.parts)):
if searcher1.compareToStream(thisCadence.parts[i].flat) is True:
notesStr = ""
for thisNote in thisCadence.parts[i].flat.notesAndRests:
#thisNote.editorial.color = "blue"
if thisNote.isRest is False:
notesStr += thisNote.nameWithOctave + " "
else:
notesStr += "r "
streamOpus.insert(0, thisCadence)
# streamLily += "\\score {" + \
# "<< \\time " + str(thisCadence.timeSig) + \
# "\n \\new Staff {" + str(thisCadence.parts[i].lily) + "} >>" + \
# thisCadence.header() + "\n}\n"
print(u"In piece %r found in stream %d: %s" % (thisWork.title, i, notesStr))
if any(streamOpus):
streamOpus.show('lily.png')
def findRandomVerona():
searchForNotes("A4 F4 G4 E4 F4 G4") #p. 4 cadence 1
searchForNotes("F4 G4 A4 G4 A4 F4 G4 A4") #p. 4 incipit 2
def findCasanatense522():
searchForIntervals("D4 E4 D4 C4 D4 E4 F4")
def findRavi3ORegina():
searchForNotes("G16 G16 F8 E16") # should be cadence A, cantus
def searchForVat1969():
'''There is a particular piece in Vatican MS 1969 that I have been searching for forever, its first
ending concludes DED and second ending CDC, OR SOME TRANSPOSITION of these notes. Find it!'''
ballataObj = cadencebook.BallataSheet()
for thisWork in ballataObj:
cadB1 = thisWork.cadenceB1Class()
cadB2 = thisWork.cadenceB2Class()
if (cadB2 is None or len(cadB2.parts) == 0): continue
if (cadB1 is None or len(cadB1.parts) == 0): continue
for i in range(0, len(cadB2.parts)):
strB1 = cadB1.parts[i].flat
strB2 = cadB2.parts[i].flat
if len(strB1.notesAndRests) < 3 or len(strB2.notesAndRests) < 3:
break
if findUpDown(strB1.notesAndRests[-3], strB1.notesAndRests[-2], strB1.notesAndRests[-1]):
if findUpDown(strB2.notesAndRests[-3], strB2.notesAndRests[-2], strB2.notesAndRests[-1]):
print(thisWork.title.encode('utf-8') + " ",)
b1b2int = interval.Interval(note1 = strB1.notesAndRests[-1], note2 = strB2.notesAndRests[-1])
print(b1b2int.diatonic.generic.niceName)
def findUpDown(n1, n2, n3):
if n1.isRest or n2.isRest or n3.isRest: return False
i1 = interval.Interval(note1 = n1, note2 = n2)
if i1.diatonic.generic.simpleDirected != 2: return False
i2 = interval.Interval(note1 = n2, note2 = n3)
if i2.diatonic.generic.simpleDirected != -2: return False
return True
def audioVirelaiSearch():
#from music21 import audioSearch
from music21.audioSearch import transcriber
from music21 import search
virelaisSheet = cadencebook.TrecentoSheet(sheetname = 'virelais')
virelaiCantuses = []
for i in range(2, 54):
thisVirelai = virelaisSheet.makeWork(i)
if thisVirelai.title != "":
try:
vc = thisVirelai.incipit.getElementsByClass('Part')[0]
vc.insert(0, metadata.Metadata(title = thisVirelai.title))
virelaiCantuses.append(vc)
except IndexError:
pass
searchScore = transcriber.runTranscribe(show = False, plot = False, seconds = 10.0, saveFile = False)
#from music21 import converter
#searchScore = converter.parse("c'4 a8 a4 g8 b4. d'4. c8 b a g f4", '6/8')
#searchScore.show()
l = search.approximateNoteSearch(searchScore, virelaiCantuses)
for i in l:
print(i.metadata.title, i.matchProbability)
l[0].show()
def findSimilarGloriaParts():
'''
Looks in the list of Gloria incipits, cadences, etc. and tries to find ones which are very similar
to each other.
'''
pass
def savedSearches():
# searchForIntervals("E4 C4 C4 B3") # Assisi 187.1
# searchForIntervals("D4 C4 C4 C4") # Assisi 187.2
# searchForIntervals("D4 A3 A3 A3 B3 C4") # Donna si to fallito TEST
# searchForNotes("G3 D3 R D3 D3 E3 F3") # Donna si to fallito TEST - last note = F#
# searchForIntervals("F3 C3 C3 F3 G3") # Bologna Archivio: Per seguirla TEST
# searchForNotes("F3 E3 F3 G3 F3 E3") # Mons archive fragment -- see FB Aetas Aurea post
searchForNotes("F4 G4 F4 B4 G4 A4 G4 F4 E4") # or B-4. Paris 25406 -- Dominique Gatte pen tests
# searchForNotes("D4 D4 C4 D4") # Fortuna Rira Seville 25 TEST! CANNOT FIND
# searchForNotes("D4 C4 B3 A3 G3") # Tenor de monaco so tucto Seville 25
# searchForNotes("E4 D4 C4 B3 A3 B3 C4") # Benedicamus Domino Seville 25
# searchForNotes("D4 E4 C4 D4 E4 D4 C4") # Benedicamus Domino Seville 25
###### searchForIntervals("A4 A4 G4 A4 G4 A4") # Reina f. 18r top. = QUAL NOVITA
# searchForIntervals("G4 F4 F4 E4 E4 D4 D4 C4") # london 29987 88v C
# searchForIntervals("C4 B3 A3 A3 G3 G3 A3") # London 29987 88v T
#searchForNotes("G3 E3 F3 G3 F3 E3 D3 C3") # probable French piece, Nuremberg 9, but worth a check
# searchForIntervals("A4 A4 G4 G4 F4 E4") # Nuremberg 9a, staff 6 probable French Piece
# findCasanatense522()
# findRandomVerona()
# findRavi3ORegina()
#searchForIntervals("D4 B4 D4 C4 D4") # cadence formula from 15th c. that Lisa Cotton was searching for in earlier sources -- none found
#searchForIntervals("G4 A4 G4 F4 E4 F4 G4 E4") # Prague XVII.J.17-14_1r piece 1 -- possible contrafact -- no correct matches
#searchForIntervals("G4 A4 B4 G4 F4 G4 F4 E4") # Prague XVII.J.17-14_1r piece 2 -- possible contrafact -- no matches
#searchForIntervals("F4 A4 F4 G4 F4 G4 A4") # Duke white notation manuscript
if __name__ == "__main__":
savedSearches()
#audioVirelaiSearch()
#------------------------------------------------------------------------------
# eof
| mit |
orgito/ansible | lib/ansible/modules/packaging/os/rpm_key.py | 100 | 6840 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Ansible module to import third party repo keys to your rpm db
# Copyright: (c) 2013, Héctor Acosta <hector.acosta@gazzang.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: rpm_key
author:
- Hector Acosta (@hacosta) <hector.acosta@gazzang.com>
short_description: Adds or removes a gpg key from the rpm db
description:
- Adds or removes (rpm --import) a gpg key to your rpm database.
version_added: "1.3"
options:
key:
description:
- Key that will be modified. Can be a url, a file, or a keyid if the key already exists in the database.
required: true
state:
description:
- If the key will be imported or removed from the rpm db.
default: present
choices: [ absent, present ]
validate_certs:
description:
- If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
'''
EXAMPLES = '''
# Example action to import a key from a url
- rpm_key:
state: present
key: http://apt.sw.be/RPM-GPG-KEY.dag.txt
# Example action to import a key from a file
- rpm_key:
state: present
key: /path/to/key.gpg
# Example action to ensure a key is not present in the db
- rpm_key:
state: absent
key: DEADB33F
'''
import re
import os.path
import tempfile
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
def is_pubkey(string):
"""Verifies if string is a pubkey"""
pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*"
return bool(re.match(pgp_regex, to_native(string, errors='surrogate_or_strict'), re.DOTALL))
class RpmKey(object):
def __init__(self, module):
# If the key is a url, we need to check if it's present to be idempotent,
# to do that, we need to check the keyid, which we can get from the armor.
keyfile = None
should_cleanup_keyfile = False
self.module = module
self.rpm = self.module.get_bin_path('rpm', True)
state = module.params['state']
key = module.params['key']
self.gpg = self.module.get_bin_path('gpg')
if not self.gpg:
self.gpg = self.module.get_bin_path('gpg2', required=True)
if '://' in key:
keyfile = self.fetch_key(key)
keyid = self.getkeyid(keyfile)
should_cleanup_keyfile = True
elif self.is_keyid(key):
keyid = key
elif os.path.isfile(key):
keyfile = key
keyid = self.getkeyid(keyfile)
else:
self.module.fail_json(msg="Not a valid key %s" % key)
keyid = self.normalize_keyid(keyid)
if state == 'present':
if self.is_key_imported(keyid):
module.exit_json(changed=False)
else:
if not keyfile:
self.module.fail_json(msg="When importing a key, a valid file must be given")
self.import_key(keyfile)
if should_cleanup_keyfile:
self.module.cleanup(keyfile)
module.exit_json(changed=True)
else:
if self.is_key_imported(keyid):
self.drop_key(keyid)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
def fetch_key(self, url):
"""Downloads a key from url, returns a valid path to a gpg key"""
rsp, info = fetch_url(self.module, url)
if info['status'] != 200:
self.module.fail_json(msg="failed to fetch key at %s , error was: %s" % (url, info['msg']))
key = rsp.read()
if not is_pubkey(key):
self.module.fail_json(msg="Not a public key: %s" % url)
tmpfd, tmpname = tempfile.mkstemp()
self.module.add_cleanup_file(tmpname)
tmpfile = os.fdopen(tmpfd, "w+b")
tmpfile.write(key)
tmpfile.close()
return tmpname
def normalize_keyid(self, keyid):
"""Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is uppercase"""
ret = keyid.strip().upper()
if ret.startswith('0x'):
return ret[2:]
elif ret.startswith('0X'):
return ret[2:]
else:
return ret
def getkeyid(self, keyfile):
stdout, stderr = self.execute_command([self.gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', keyfile])
for line in stdout.splitlines():
line = line.strip()
if line.startswith('pub:'):
return line.split(':')[4]
self.module.fail_json(msg="Unexpected gpg output")
def is_keyid(self, keystr):
"""Verifies if a key, as provided by the user is a keyid"""
return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
def execute_command(self, cmd):
rc, stdout, stderr = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg=stderr)
return stdout, stderr
def is_key_imported(self, keyid):
cmd = self.rpm + ' -q gpg-pubkey'
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0: # No key is installed on system
return False
cmd += ' --qf "%{description}" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'
stdout, stderr = self.execute_command(cmd)
for line in stdout.splitlines():
if keyid in line.split(':')[4]:
return True
return False
def import_key(self, keyfile):
if not self.module.check_mode:
self.execute_command([self.rpm, '--import', keyfile])
def drop_key(self, keyid):
if not self.module.check_mode:
self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % keyid[-8:].lower()])
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
key=dict(type='str', required=True),
validate_certs=dict(type='bool', default=True),
),
supports_check_mode=True,
)
RpmKey(module)
if __name__ == '__main__':
main()
| gpl-3.0 |
ksmit799/Toontown-Source | toontown/coghq/DistributedInGameEditor.py | 1 | 29740 | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.showbase.PythonUtil import lineInfo, Functor
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from otp.level import Level
from otp.level import LevelConstants
from otp.level import Entity
from otp.level import EditMgr
from SpecImports import *
from InGameEditorElements import *
from toontown.cogdominium import CogdoEntityCreator
import string
class InGameEditorEntityBase(InGameEditorElement):
def __init__(self):
InGameEditorElement.__init__(self)
def attribChanged(self, attrib, value):
Entity.Entity.attribChanged(self, attrib, value)
print 'attribChange: %s %s, %s = %s' % (self.level.getEntityType(self.entId),
self.entId,
attrib,
repr(value))
def getTypeName(self):
return self.level.getEntityType(self.entId)
def privGetNamePrefix(self):
return '[%s-%s] ' % (self.getTypeName(), self.entId)
def privGetEntityName(self):
return self.level.levelSpec.getEntitySpec(self.entId)['name']
def getName(self):
return '%s%s' % (self.privGetNamePrefix(), self.privGetEntityName())
def setNewName(self, newName):
prefix = self.privGetNamePrefix()
if newName[:len(prefix)] == prefix:
newName = newName[len(prefix):]
oldName = self.privGetEntityName()
if oldName != newName:
self.level.setAttribEdit(self.entId, 'name', newName)
def setParentEntId(self, parentEntId):
self.parentEntId = parentEntId
self.level.buildEntityTree()
def setName(self, name):
self.name = name
self.level.buildEntityTree()
class InGameEditorEntity(Entity.Entity, InGameEditorEntityBase):
def __init__(self, level, entId):
Entity.Entity.__init__(self, level, entId)
InGameEditorEntityBase.__init__(self)
def id(self):
return self.entId
def destroy(self):
Entity.Entity.destroy(self)
class InGameEditorEditMgr(EditMgr.EditMgr, InGameEditorEntityBase):
def __init__(self, level, entId):
EditMgr.EditMgr.__init__(self, level, entId)
InGameEditorEntityBase.__init__(self)
def destroy(self):
EditMgr.EditMgr.destroy(self)
class AttribModifier(Entity.Entity, InGameEditorEntityBase):
notify = DirectNotifyGlobal.directNotify.newCategory('AttribModifier')
def __init__(self, level, entId):
Entity.Entity.__init__(self, level, entId)
InGameEditorEntityBase.__init__(self)
def destroy(self):
Entity.Entity.destroy(self)
def setValue(self, value):
if len(self.typeName) == 0:
AttribModifier.notify.warning('no typeName set')
return
entTypeReg = self.level.entTypeReg
if self.typeName not in entTypeReg.getAllTypeNames():
AttribModifier.notify.warning('invalid typeName: %s' % self.typeName)
return
typeDesc = entTypeReg.getTypeDesc(self.typeName)
if len(self.attribName) == 0:
AttribModifier.notify.warning('no attribName set')
return
if self.attribName not in typeDesc.getAttribNames():
AttribModifier.notify.warning('invalid attribName: %s' % self.attribName)
return
if len(value) == 0:
AttribModifier.notify.warning('no value set')
def setAttrib(entId, typeName = self.typeName, attribName = self.attribName, value = eval(value), recursive = self.recursive):
if typeName == self.level.getEntityType(entId):
self.level.setAttribEdit(entId, attribName, value)
if recursive:
entity = self.level.getEntity(entId)
for child in entity.getChildren():
setAttrib(child.entId)
setAttrib(self.parentEntId)
def getInGameEditorEntityCreatorClass(level):
entCreator = level.createEntityCreator()
EntCreatorClass = entCreator.__class__
class InGameEditorEntityCreator(EntCreatorClass):
def __init__(self, editor):
EntCreatorClass.__init__(self, editor)
entTypes = self.entType2Ctor.keys()
for type in entTypes:
self.entType2Ctor[type] = InGameEditorEntity
self.entType2Ctor['editMgr'] = InGameEditorEditMgr
self.entType2Ctor['attribModifier'] = AttribModifier
return InGameEditorEntityCreator
class DistributedInGameEditor(DistributedObject.DistributedObject, Level.Level, InGameEditorElement):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedInGameEditor')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
Level.Level.__init__(self)
InGameEditorElement.__init__(self)
self.editorInitialized = 0
self.specModified = 0
self.undoStack = []
self.redoStack = []
self.entCreateHandlerQ = []
self.entitiesWeCreated = []
self.nodePathId2EntId = {}
def generate(self):
self.notify.debug('generate')
DistributedObject.DistributedObject.generate(self)
base.inGameEditor = self
def setEditorAvId(self, editorAvId):
self.editorAvId = editorAvId
def setEditUsername(self, editUsername):
self.editUsername = editUsername
def getEditUsername(self):
return self.editUsername
def setLevelDoId(self, levelDoId):
self.levelDoId = levelDoId
self.level = base.cr.doId2do[self.levelDoId]
def getLevelDoId(self):
return self.levelDoId
def announceGenerate(self):
self.notify.debug('announceGenerate')
DistributedObject.DistributedObject.announceGenerate(self)
if self.editorIsLocalToon():
from otp.level import EditorGlobals
EditorGlobals.assertReadyToEdit()
self.notify.debug('requesting an up-to-date copy of the level spec')
self.sendUpdate('requestCurrentLevelSpec')
def setSpecSenderDoId(self, doId):
DistributedInGameEditor.notify.debug('setSpecSenderDoId: %s' % doId)
blobSender = base.cr.doId2do[doId]
def setSpecBlob(specBlob, blobSender = blobSender, self = self):
blobSender.sendAck()
from otp.level.LevelSpec import LevelSpec
curSpec = eval(specBlob)
self.gotCurrentSpec(curSpec)
if blobSender.isComplete():
setSpecBlob(blobSender.getBlob())
else:
evtName = self.uniqueName('specDone')
blobSender.setDoneEvent(evtName)
self.acceptOnce(evtName, setSpecBlob)
def gotCurrentSpec(self, curSpec):
self.entTypeReg = self.level.getEntityTypeReg()
curSpec.setEntityTypeReg(self.entTypeReg)
self.axis = loader.loadModel('models/misc/xyzAxis.bam')
self.axis.setColorOff()
self.axis.setColorScale(1, 1, 1, 1, 1)
self.initializeLevel(self.doId, curSpec, curSpec.getScenario())
entCreator = self.level.createEntityCreator()
self.entTypes = entCreator.getEntityTypes()
self.selectedEntity = None
base.startTk()
import InGameEditor
doneEvent = self.uniqueName('editorDone')
saveAsEvent = self.uniqueName('saveSpec')
requestSaveEvent = self.uniqueName('requestSpecSave')
undoEvent = self.uniqueName('undoEvent')
redoEvent = self.uniqueName('redoEvent')
wireframeEvent = self.uniqueName('wireframeEvent')
oobeEvent = self.uniqueName('oobeEvent')
csEvent = self.uniqueName('csEvent')
runEvent = self.uniqueName('runEvent')
texEvent = self.uniqueName('texEvent')
self.editor = InGameEditor.InGameEditor(level=self, doneEvent=doneEvent, requestSaveEvent=requestSaveEvent, saveAsEvent=saveAsEvent, undoEvent=undoEvent, redoEvent=redoEvent, wireframeEvent=wireframeEvent, oobeEvent=oobeEvent, csEvent=csEvent, runEvent=runEvent, texEvent=texEvent)
self.acceptOnce(doneEvent, self.doneEditing)
self.accept(saveAsEvent, self.saveSpec)
self.accept(requestSaveEvent, self.requestSpecSave)
self.accept(undoEvent, self.doUndo)
self.accept(redoEvent, self.doRedo)
self.accept(wireframeEvent, self.doWireframe)
self.accept(oobeEvent, self.doOobe)
self.accept(csEvent, self.doCs)
self.accept(runEvent, self.doRun)
self.accept(texEvent, self.doTex)
self.accept(self.editor.getEventMsgName('Select'), self.handleEntitySelect)
self.accept(self.editor.getEventMsgName('Flash'), self.handleEntitySelect)
self.editorInitialized = 1
self.buildEntityTree()
return
def editorIsLocalToon(self):
return self.editorAvId == base.localAvatar.doId
def createEntityCreator(self):
return getInGameEditorEntityCreatorClass(self.level)(self)
def doneEditing(self):
self.notify.debug('doneEditing')
if self.specModified:
if self.editor.askYesNo('Save the spec on the AI?'):
self.requestSpecSave()
self.sendUpdate('setFinished')
def disable(self):
self.notify.debug('disable')
if self.editorInitialized and self.editorIsLocalToon():
self.axis.removeNode()
del self.axis
if hasattr(self, 'entTypeReg'):
del self.entTypeReg
self.editorInitialized = 0
Level.Level.destroyLevel(self)
if hasattr(self, 'editor'):
self.editor.quit()
del self.editor
DistributedObject.DistributedObject.disable(self)
self.ignoreAll()
def getEntInstance(self, entId):
return self.level.getEntity(entId)
def getEntInstanceNP(self, entId):
entity = self.getEntInstance(entId)
if entity is None:
return
if isinstance(entity, NodePath):
return entity
if hasattr(entity, 'getNodePath'):
return entity.getNodePath()
return
def getEntInstanceNPCopy(self, entId):
np = self.getEntInstanceNP(entId)
if np is None:
return np
stashNodeGroups = []
searches = ('**/+ActorNode', '**/+Character')
for search in searches:
stashNodeGroups.append(np.findAllMatches(search))
for group in stashNodeGroups:
if not group.isEmpty():
group.stash()
par = np.getParent()
copy = np.copyTo(par)
for group in stashNodeGroups:
if not group.isEmpty():
group.unstash()
return copy
def saveSpec(self, filename):
return self.levelSpec.saveToDisk(filename)
def setEntityParent(self, entity, parent):
parent.addChild(entity)
entity._parentEntity = parent
def insertEntityIntoTree(self, entId):
ent = self.getEntity(entId)
if entId == LevelConstants.UberZoneEntId:
self.setEntityParent(ent, self)
return
parentEnt = self.getEntity(ent.parentEntId)
if parentEnt is not None:
self.setEntityParent(ent, parentEnt)
return
self.setEntityParent(ent, self.uberZoneEntity)
return
def buildEntityTree(self):
self.setChildren([])
entIds = self.entities.keys()
entIds.sort()
for entId in entIds:
ent = self.getEntity(entId)
ent.setChildren([])
for entId in entIds:
self.insertEntityIntoTree(entId)
self.editor.refreshExplorer()
def onEntityCreate(self, entId):
DistributedInGameEditor.notify.debug('onEntityCreate %s' % entId)
Level.Level.onEntityCreate(self, entId)
entityNP = self.getEntInstanceNP(entId)
if entityNP:
self.nodePathId2EntId[entityNP.id()] = entId
if not self.editorInitialized:
return
self.insertEntityIntoTree(entId)
self.editor.refreshExplorer()
if entId == self.entitiesWeCreated[0]:
self.entitiesWeCreated = self.entitiesWeCreated[1:]
self.editor.selectEntity(entId)
def onEntityDestroy(self, entId):
DistributedInGameEditor.notify.debug('onEntityDestroy %s' % entId)
ent = self.getEntity(entId)
if self.editorInitialized:
entityNP = self.getEntInstanceNP(entId)
if entityNP in self.nodePathId2EntId:
del self.nodePathId2EntId[entityNP.id()]
if ent is self.selectedEntity:
self.editor.clearAttribEditPane()
self.selectedEntity = None
ent._parentEntity.removeChild(ent)
del ent._parentEntity
self.editor.refreshExplorer()
Level.Level.onEntityDestroy(self, entId)
return
def handleEntitySelect(self, entity):
self.selectedEntity = entity
if hasattr(self, 'identifyIval'):
self.identifyIval.finish()
if entity is self:
self.editor.clearAttribEditPane()
else:
entityNP = self.getEntInstanceNP(entity.entId)
if entityNP is not None:
dur = float(0.5)
oColor = entityNP.getColorScale()
flashIval = Sequence(Func(Functor(entityNP.setColorScale, 1, 0, 0, 1)), WaitInterval(dur / 3), Func(Functor(entityNP.setColorScale, 0, 1, 0, 1)), WaitInterval(dur / 3), Func(Functor(entityNP.setColorScale, 0, 0, 1, 1)), WaitInterval(dur / 3), Func(Functor(entityNP.setColorScale, oColor[0], oColor[1], oColor[2], oColor[3])))
boundIval = Sequence(Func(entityNP.showBounds), WaitInterval(dur * 0.5), Func(entityNP.hideBounds))
entCp = self.getEntInstanceNPCopy(entity.entId)
entCp.setRenderModeWireframe()
entCp.setTextureOff(1)
wireIval = Sequence(Func(Functor(entCp.setColor, 1, 0, 0, 1, 1)), WaitInterval(dur / 3), Func(Functor(entCp.setColor, 0, 1, 0, 1, 1)), WaitInterval(dur / 3), Func(Functor(entCp.setColor, 0, 0, 1, 1, 1)), WaitInterval(dur / 3), Func(entCp.removeNode))
self.identifyIval = Parallel(flashIval, boundIval, wireIval)
def putAxis(self = self, entityNP = entityNP):
self.axis.reparentTo(entityNP)
self.axis.setPos(0, 0, 0)
self.axis.setHpr(0, 0, 0)
def takeAxis(self = self):
self.axis.reparentTo(hidden)
self.identifyIval = Sequence(Func(putAxis), Parallel(self.identifyIval, WaitInterval(1000.5)), Func(takeAxis))
self.identifyIval.start()
self.editor.updateAttribEditPane(entity.entId, self.levelSpec, self.entTypeReg)
entType = self.getEntityType(entity.entId)
menu = self.editor.menuBar.component('Entity-menu')
index = menu.index('Remove Selected Entity')
if entType in self.entTypeReg.getPermanentTypeNames():
menu.entryconfigure(index, state='disabled')
else:
menu.entryconfigure(index, state='normal')
return
def privSendAttribEdit(self, entId, attrib, value):
self.specModified = 1
valueStr = repr(value)
self.notify.debug("sending edit: %s: '%s' = %s" % (entId, attrib, valueStr))
self.sendUpdate('setEdit', [entId,
attrib,
valueStr,
self.editUsername])
def privExecActionList(self, actions):
for action in actions:
if callable(action):
action()
else:
entId, attrib, value = action
self.privSendAttribEdit(entId, attrib, value)
def setUndoableAttribEdit(self, old2new, new2old):
self.redoStack = []
self.undoStack.append((new2old, old2new))
self.privExecActionList(old2new)
def setAttribEdit(self, entId, attrib, value, canUndo = 1):
oldValue = eval(repr(self.levelSpec.getEntitySpec(entId)[attrib]))
new2old = [(entId, attrib, oldValue)]
old2new = [(entId, attrib, value)]
self.setUndoableAttribEdit(old2new, new2old)
if not canUndo:
self.undoStack = []
def doUndo(self):
if len(self.undoStack) == 0:
self.editor.showWarning('Nothing left to undo')
return
undo = self.undoStack.pop()
self.redoStack.append(undo)
new2old, old2new = undo
self.privExecActionList(new2old)
def doRedo(self):
if len(self.redoStack) == 0:
self.editor.showWarning('Nothing to redo')
return
redo = self.redoStack.pop()
self.undoStack.append(redo)
new2old, old2new = redo
self.privExecActionList(old2new)
def doWireframe(self):
messenger.send('magicWord', ['~wire'])
def doOobe(self):
messenger.send('magicWord', ['~oobe'])
def doCs(self):
messenger.send('magicWord', ['~cs'])
def doRun(self):
messenger.send('magicWord', ['~run'])
def doTex(self):
messenger.send('magicWord', ['~tex'])
def insertEntity(self, entType, parentEntId = None, callback = None):
if parentEntId is None:
try:
parentEntId = self.selectedEntity.entId
except AttributeError:
self.editor.showWarning('Please select a valid parent entity first.', 'error')
return
removeAction = (self.editMgrEntity.entId, 'removeEntity', {'entId': 'REPLACEME'})
new2old = [removeAction]
def setNewEntityId(entId, self = self, action = removeAction, callback = callback):
action[2]['entId'] = entId
if callback:
callback(entId)
def setEntCreateHandler(self = self, handler = setNewEntityId):
self.entCreateHandlerQ.append(handler)
old2new = [setEntCreateHandler, (self.editMgrEntity.entId, 'requestNewEntity', {'entType': entType,
'parentEntId': parentEntId,
'username': self.editUsername})]
self.setUndoableAttribEdit(old2new, new2old)
return
def setEntityCreatorUsername(self, entId, editUsername):
Level.Level.setEntityCreatorUsername(self, entId, editUsername)
if editUsername == self.getEditUsername():
print 'entity %s about to be created; we requested it' % entId
callback = self.entCreateHandlerQ[0]
del self.entCreateHandlerQ[:1]
callback(entId)
self.entitiesWeCreated.append(entId)
def removeSelectedEntity(self):
try:
selectedEntId = self.selectedEntity.entId
except AttributeError:
self.editor.showWarning('Please select a valid entity to be removed first.', 'error')
return -1
if self.getEntity(selectedEntId).getNumChildren() > 0:
self.editor.showWarning('Remove children first.')
return -1
self.doRemoveEntity(selectedEntId)
def removeSelectedEntityTree(self):
try:
selectedEntId = self.selectedEntity.entId
except AttributeError:
self.editor.showWarning('Please select a valid entity to be removed first.', 'error')
return -1
def removeEntity(entId):
entity = self.getEntity(entId)
for child in entity.getChildren():
removeEntity(child.entId)
self.doRemoveEntity(entId)
removeEntity(selectedEntId)
def doRemoveEntity(self, entId):
parentEntId = self.getEntity(entId)._parentEntity.entId
entType = self.getEntityType(entId)
if entType in self.entTypeReg.getPermanentTypeNames():
self.editor.showWarning("Cannot remove entities of type '%s'" % entType)
return
removeAction = (self.editMgrEntity.entId, 'removeEntity', {'entId': entId})
old2new = [removeAction]
oldAttribs = []
spec = self.levelSpec.getEntitySpecCopy(entId)
del spec['type']
for attrib, value in spec.items():
oldAttribs.append((attrib, value))
def setNewEntityId(entId, self = self, removeAction = removeAction, oldAttribs = oldAttribs):
removeAction[2]['entId'] = entId
for attrib, value in spec.items():
self.privSendAttribEdit(entId, attrib, value)
def setEntCreateHandler(self = self, handler = setNewEntityId):
self.entCreateHandlerQ.append(handler)
new2old = [setEntCreateHandler, (self.editMgrEntity.entId, 'requestNewEntity', {'entType': self.getEntityType(entId),
'parentEntId': parentEntId,
'username': self.editUsername})]
self.setUndoableAttribEdit(old2new, new2old)
def makeCopyOfEntName(self, name):
prefix = 'copy of '
suffix = ' (%s)'
oldName = name
if len(oldName) <= len(prefix):
newName = prefix + oldName
elif oldName[:len(prefix)] != prefix:
newName = prefix + oldName
else:
hasSuffix = True
copyNum = 2
if oldName[-1] != ')':
hasSuffix = False
if hasSuffix and oldName[-2] in string.digits:
i = len(oldName) - 2
numString = ''
while oldName[i] in string.digits:
numString = oldName[i] + numString
i -= 1
if oldName[i] != '(':
hasSuffix = False
else:
i -= 1
if oldName[i] != ' ':
hasSuffix = False
else:
print 'numString: %s' % numString
copyNum = int(numString) + 1
if hasSuffix:
newName = oldName[:i] + suffix % copyNum
else:
newName = oldName + suffix % copyNum
return newName
def duplicateSelectedEntity(self):
try:
selectedEntId = self.selectedEntity.entId
parentEntId = self.selectedEntity._parentEntity.entId
except AttributeError:
self.editor.showWarning('Please select a valid entity to be removed first.', 'error')
return
if self.selectedEntity.getNumChildren() > 0:
self.editor.showTodo('Cannot duplicate entity with children.')
return
removeAction = (self.editMgrEntity.entId, 'removeEntity', {'entId': selectedEntId})
new2old = [removeAction]
copyAttribs = self.levelSpec.getEntitySpecCopy(selectedEntId)
copyAttribs['comment'] = ''
copyAttribs['name'] = self.makeCopyOfEntName(copyAttribs['name'])
typeDesc = self.entTypeReg.getTypeDesc(copyAttribs['type'])
attribDescs = typeDesc.getAttribDescDict()
for attribName, attribDesc in attribDescs.items():
if attribDesc.getDatatype() == 'const':
del copyAttribs[attribName]
def setNewEntityId(entId, self = self, removeAction = removeAction, copyAttribs = copyAttribs):
removeAction[2]['entId'] = entId
for attribName, value in copyAttribs.items():
self.privSendAttribEdit(entId, attribName, value)
def setEntCreateHandler(self = self, handler = setNewEntityId):
self.entCreateHandlerQ.append(handler)
old2new = [setEntCreateHandler, (self.editMgrEntity.entId, 'requestNewEntity', {'entType': self.getEntityType(selectedEntId),
'parentEntId': parentEntId,
'username': self.editUsername})]
self.setUndoableAttribEdit(old2new, new2old)
def specPrePickle(self, spec):
for attribName, value in spec.items():
spec[attribName] = repr(value)
return spec
def specPostUnpickle(self, spec):
for attribName, value in spec.items():
spec[attribName] = eval(value)
return spec
def handleImportEntities(self):
try:
selectedEntId = self.selectedEntity.entId
except AttributeError:
self.editor.showWarning('Please select a valid entity first.', 'error')
return
import tkFileDialog
filename = tkFileDialog.askopenfilename(parent=self.editor.parent, defaultextension='.egroup', filetypes=[('Entity Group', '.egroup'), ('All Files', '*')])
if len(filename) == 0:
return
try:
import pickle
f = open(filename, 'r')
eTree = pickle.load(f)
eGroup = pickle.load(f)
for entId, spec in eGroup.items():
eGroup[entId] = self.specPostUnpickle(spec)
except:
self.editor.showWarning("Error importing entity group from '%s'." % filename, 'error')
return
oldEntId2new = {}
def addEntities(treeEntry, parentEntId, eGroup = eGroup):
for entId, children in treeEntry.items():
spec = eGroup[entId]
entType = spec['type']
del spec['type']
del spec['parentEntId']
typeDesc = self.entTypeReg.getTypeDesc(entType)
for attribName, attribDesc in typeDesc.getAttribDescDict().items():
if attribDesc.getDatatype() == 'const':
if attribName in spec:
del spec[attribName]
def handleEntityInsertComplete(newEntId, oldEntId = entId, oldEntId2new = oldEntId2new, spec = spec, treeEntry = treeEntry, addEntities = addEntities):
oldEntId2new[oldEntId] = newEntId
def assignAttribs(entId = newEntId, oldEntId = oldEntId, spec = spec, treeEntry = treeEntry):
for attribName in spec:
self.setAttribEdit(entId, attribName, spec[attribName])
addEntities(treeEntry[oldEntId], newEntId)
self.acceptOnce(self.getEntityCreateEvent(newEntId), assignAttribs)
self.insertEntity(entType, parentEntId=parentEntId, callback=handleEntityInsertComplete)
addEntities(eTree, selectedEntId)
def handleExportEntity(self):
try:
selectedEntId = self.selectedEntity.entId
except AttributeError:
self.editor.showWarning('Please select a valid entity first.', 'error')
return
import tkFileDialog
filename = tkFileDialog.asksaveasfilename(parent=self.editor.parent, defaultextension='.egroup', filetypes=[('Entity Group', '.egroup'), ('All Files', '*')])
if len(filename) == 0:
return
eTree = {selectedEntId: {}}
eGroup = {}
eGroup[selectedEntId] = self.levelSpec.getEntitySpecCopy(selectedEntId)
for entId, spec in eGroup.items():
eGroup[entId] = self.specPrePickle(spec)
try:
import pickle
f = open(filename, 'w')
pickle.dump(eTree, f)
pickle.dump(eGroup, f)
except:
self.editor.showWarning("Error exporting entity group to '%s'." % filename, 'error')
return
def handleExportEntityTree(self):
try:
selectedEntId = self.selectedEntity.entId
except AttributeError:
self.editor.showWarning('Please select a valid entity first.', 'error')
return
import tkFileDialog
filename = tkFileDialog.asksaveasfilename(parent=self.editor.parent, defaultextension='.egroup', filetypes=[('Entity Group', '.egroup'), ('All Files', '*')])
if len(filename) == 0:
return
eTree = {}
eGroup = {}
def addEntity(entId, treeEntry):
treeEntry[entId] = {}
eGroup[entId] = self.levelSpec.getEntitySpecCopy(entId)
entity = self.getEntity(entId)
for child in entity.getChildren():
addEntity(child.entId, treeEntry[entId])
addEntity(selectedEntId, eTree)
for entId, spec in eGroup.items():
eGroup[entId] = self.specPrePickle(spec)
try:
import pickle
f = open(filename, 'w')
pickle.dump(eTree, f)
pickle.dump(eGroup, f)
except:
self.editor.showWarning("Error exporting entity group to '%s'." % filename, 'error')
return
def moveAvToSelected(self):
try:
selectedEntId = self.selectedEntity.entId
except AttributeError:
self.editor.showWarning('Please select a valid entity first.', 'error')
return
entNp = self.getEntInstanceNP(selectedEntId)
if entNp is None:
zoneEntId = self.levelSpec.getEntityZoneEntId(selectedEntId)
entNp = self.getEntInstanceNP(zoneEntId)
base.localAvatar.setPos(entNp, 0, 0, 0)
base.localAvatar.setHpr(entNp, 0, 0, 0)
zoneNum = self.getEntityZoneEntId(selectedEntId)
self.level.enterZone(zoneNum)
return
def requestSpecSave(self):
self.privSendAttribEdit(LevelConstants.EditMgrEntId, 'requestSave', None)
self.specModified = 0
return
def setAttribChange(self, entId, attrib, valueStr, username):
if username == self.editUsername:
print 'we got our own edit back!'
value = eval(valueStr)
self.levelSpec.setAttribChange(entId, attrib, value, username)
def getTypeName(self):
return 'Level'
| mit |
larsks/cloud-init-patches | cloudinit/config/cc_rh_subscription.py | 1 | 15746 | # vi: ts=4 expandtab
#
# Copyright (C) 2015 Red Hat, Inc.
#
# Author: Brent Baude <bbaude@redhat.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit import util
distros = ['fedora', 'rhel']
def handle(name, cfg, _cloud, log, _args):
sm = SubscriptionManager(cfg)
sm.log = log
if not sm.is_configured():
log.debug("%s: module not configured.", name)
return None
if not sm.is_registered():
try:
verify, verify_msg = sm._verify_keys()
if verify is not True:
raise SubscriptionError(verify_msg)
cont = sm.rhn_register()
if not cont:
raise SubscriptionError("Registration failed or did not "
"run completely")
# Splitting up the registration, auto-attach, and servicelevel
# commands because the error codes, messages from subman are not
# specific enough.
# Attempt to change the service level
if sm.auto_attach and sm.servicelevel is not None:
if not sm._set_service_level():
raise SubscriptionError("Setting of service-level "
"failed")
else:
sm.log.debug("Completed auto-attach with service level")
elif sm.auto_attach:
if not sm._set_auto_attach():
raise SubscriptionError("Setting auto-attach failed")
else:
sm.log.debug("Completed auto-attach")
if sm.pools is not None:
if not isinstance(sm.pools, list):
pool_fail = "Pools must in the format of a list"
raise SubscriptionError(pool_fail)
return_stat = sm.addPool(sm.pools)
if not return_stat:
raise SubscriptionError("Unable to attach pools {0}"
.format(sm.pools))
if (sm.enable_repo is not None) or (sm.disable_repo is not None):
return_stat = sm.update_repos(sm.enable_repo, sm.disable_repo)
if not return_stat:
raise SubscriptionError("Unable to add or remove repos")
sm.log_success("rh_subscription plugin completed successfully")
except SubscriptionError as e:
sm.log_warn(str(e))
sm.log_warn("rh_subscription plugin did not complete successfully")
else:
sm.log_success("System is already registered")
class SubscriptionError(Exception):
pass
class SubscriptionManager(object):
valid_rh_keys = ['org', 'activation-key', 'username', 'password',
'disable-repo', 'enable-repo', 'add-pool',
'rhsm-baseurl', 'server-hostname',
'auto-attach', 'service-level']
def __init__(self, cfg):
self.cfg = cfg
self.rhel_cfg = self.cfg.get('rh_subscription', {})
self.rhsm_baseurl = self.rhel_cfg.get('rhsm-baseurl')
self.server_hostname = self.rhel_cfg.get('server-hostname')
self.pools = self.rhel_cfg.get('add-pool')
self.activation_key = self.rhel_cfg.get('activation-key')
self.org = self.rhel_cfg.get('org')
self.userid = self.rhel_cfg.get('username')
self.password = self.rhel_cfg.get('password')
self.auto_attach = self.rhel_cfg.get('auto-attach')
self.enable_repo = self.rhel_cfg.get('enable-repo')
self.disable_repo = self.rhel_cfg.get('disable-repo')
self.servicelevel = self.rhel_cfg.get('service-level')
self.subman = ['subscription-manager']
def log_success(self, msg):
'''Simple wrapper for logging info messages. Useful for unittests'''
self.log.info(msg)
def log_warn(self, msg):
'''Simple wrapper for logging warning messages. Useful for unittests'''
self.log.warn(msg)
def _verify_keys(self):
'''
Checks that the keys in the rh_subscription dict from the user-data
are what we expect.
'''
for k in self.rhel_cfg:
if k not in self.valid_rh_keys:
bad_key = "{0} is not a valid key for rh_subscription. "\
"Valid keys are: "\
"{1}".format(k, ', '.join(self.valid_rh_keys))
return False, bad_key
# Check for bad auto-attach value
if (self.auto_attach is not None) and \
not (util.is_true(self.auto_attach) or
util.is_false(self.auto_attach)):
not_bool = "The key auto-attach must be a boolean value "\
"(True/False "
return False, not_bool
if (self.servicelevel is not None) and ((not self.auto_attach) or
(util.is_false(str(self.auto_attach)))):
no_auto = ("The service-level key must be used in conjunction "
"with the auto-attach key. Please re-run with "
"auto-attach: True")
return False, no_auto
return True, None
def is_registered(self):
'''
Checks if the system is already registered and returns
True if so, else False
'''
cmd = ['identity']
try:
self._sub_man_cli(cmd)
except util.ProcessExecutionError:
return False
return True
def _sub_man_cli(self, cmd, logstring_val=False):
'''
Uses the prefered cloud-init subprocess def of util.subp
and runs subscription-manager. Breaking this to a
separate function for later use in mocking and unittests
'''
cmd = self.subman + cmd
return util.subp(cmd, logstring=logstring_val)
def rhn_register(self):
'''
Registers the system by userid and password or activation key
and org. Returns True when successful False when not.
'''
if (self.activation_key is not None) and (self.org is not None):
# register by activation key
cmd = ['register', '--activationkey={0}'.
format(self.activation_key), '--org={0}'.format(self.org)]
# If the baseurl and/or server url are passed in, we register
# with them.
if self.rhsm_baseurl is not None:
cmd.append("--baseurl={0}".format(self.rhsm_baseurl))
if self.server_hostname is not None:
cmd.append("--serverurl={0}".format(self.server_hostname))
try:
return_out, return_err = self._sub_man_cli(cmd,
logstring_val=True)
except util.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
"to: {0}".format(e.stderr))
return False
elif (self.userid is not None) and (self.password is not None):
# register by username and password
cmd = ['register', '--username={0}'.format(self.userid),
'--password={0}'.format(self.password)]
# If the baseurl and/or server url are passed in, we register
# with them.
if self.rhsm_baseurl is not None:
cmd.append("--baseurl={0}".format(self.rhsm_baseurl))
if self.server_hostname is not None:
cmd.append("--serverurl={0}".format(self.server_hostname))
# Attempting to register the system only
try:
return_out, return_err = self._sub_man_cli(cmd,
logstring_val=True)
except util.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
"to: {0}".format(e.stderr))
return False
else:
self.log_warn("Unable to register system due to incomplete "
"information.")
self.log_warn("Use either activationkey and org *or* userid "
"and password")
return False
reg_id = return_out.split("ID: ")[1].rstrip()
self.log.debug("Registered successfully with ID {0}".format(reg_id))
return True
def _set_service_level(self):
cmd = ['attach', '--auto', '--servicelevel={0}'
.format(self.servicelevel)]
try:
return_out, return_err = self._sub_man_cli(cmd)
except util.ProcessExecutionError as e:
if e.stdout.rstrip() != '':
for line in e.stdout.split("\n"):
if line is not '':
self.log_warn(line)
else:
self.log_warn("Setting the service level failed with: "
"{0}".format(e.stderr.strip()))
return False
for line in return_out.split("\n"):
if line is not "":
self.log.debug(line)
return True
def _set_auto_attach(self):
cmd = ['attach', '--auto']
try:
return_out, return_err = self._sub_man_cli(cmd)
except util.ProcessExecutionError:
self.log_warn("Auto-attach failed with: "
"{0}]".format(return_err.strip()))
return False
for line in return_out.split("\n"):
if line is not "":
self.log.debug(line)
return True
def _getPools(self):
'''
Gets the list pools for the active subscription and returns them
in list form.
'''
available = []
consumed = []
# Get all available pools
cmd = ['list', '--available', '--pool-only']
results, errors = self._sub_man_cli(cmd)
available = (results.rstrip()).split("\n")
# Get all consumed pools
cmd = ['list', '--consumed', '--pool-only']
results, errors = self._sub_man_cli(cmd)
consumed = (results.rstrip()).split("\n")
return available, consumed
def _getRepos(self):
'''
Obtains the current list of active yum repositories and returns
them in list form.
'''
cmd = ['repos', '--list-enabled']
return_out, return_err = self._sub_man_cli(cmd)
active_repos = []
for repo in return_out.split("\n"):
if "Repo ID:" in repo:
active_repos.append((repo.split(':')[1]).strip())
cmd = ['repos', '--list-disabled']
return_out, return_err = self._sub_man_cli(cmd)
inactive_repos = []
for repo in return_out.split("\n"):
if "Repo ID:" in repo:
inactive_repos.append((repo.split(':')[1]).strip())
return active_repos, inactive_repos
def addPool(self, pools):
'''
Takes a list of subscription pools and "attaches" them to the
current subscription
'''
# An empty list was passed
if len(pools) == 0:
self.log.debug("No pools to attach")
return True
pool_available, pool_consumed = self._getPools()
pool_list = []
cmd = ['attach']
for pool in pools:
if (pool not in pool_consumed) and (pool in pool_available):
pool_list.append('--pool={0}'.format(pool))
else:
self.log_warn("Pool {0} is not available".format(pool))
if len(pool_list) > 0:
cmd.extend(pool_list)
try:
self._sub_man_cli(cmd)
self.log.debug("Attached the following pools to your "
"system: %s" % (", ".join(pool_list))
.replace('--pool=', ''))
return True
except util.ProcessExecutionError as e:
self.log_warn("Unable to attach pool {0} "
"due to {1}".format(pool, e))
return False
def update_repos(self, erepos, drepos):
'''
Takes a list of yum repo ids that need to be disabled or enabled; then
it verifies if they are already enabled or disabled and finally
executes the action to disable or enable
'''
if (erepos is not None) and (not isinstance(erepos, list)):
self.log_warn("Repo IDs must in the format of a list.")
return False
if (drepos is not None) and (not isinstance(drepos, list)):
self.log_warn("Repo IDs must in the format of a list.")
return False
# Bail if both lists are not populated
if (len(erepos) == 0) and (len(drepos) == 0):
self.log.debug("No repo IDs to enable or disable")
return True
active_repos, inactive_repos = self._getRepos()
# Creating a list of repoids to be enabled
enable_list = []
enable_list_fail = []
for repoid in erepos:
if (repoid in inactive_repos):
enable_list.append("--enable={0}".format(repoid))
else:
enable_list_fail.append(repoid)
# Creating a list of repoids to be disabled
disable_list = []
disable_list_fail = []
for repoid in drepos:
if repoid in active_repos:
disable_list.append("--disable={0}".format(repoid))
else:
disable_list_fail.append(repoid)
# Logging any repos that are already enabled or disabled
if len(enable_list_fail) > 0:
for fail in enable_list_fail:
# Check if the repo exists or not
if fail in active_repos:
self.log.debug("Repo {0} is already enabled".format(fail))
else:
self.log_warn("Repo {0} does not appear to "
"exist".format(fail))
if len(disable_list_fail) > 0:
for fail in disable_list_fail:
self.log.debug("Repo {0} not disabled "
"because it is not enabled".format(fail))
cmd = ['repos']
if len(disable_list) > 0:
cmd.extend(disable_list)
if len(enable_list) > 0:
cmd.extend(enable_list)
try:
self._sub_man_cli(cmd)
except util.ProcessExecutionError as e:
self.log_warn("Unable to alter repos due to {0}".format(e))
return False
if len(enable_list) > 0:
self.log.debug("Enabled the following repos: %s" %
(", ".join(enable_list)).replace('--enable=', ''))
if len(disable_list) > 0:
self.log.debug("Disabled the following repos: %s" %
(", ".join(disable_list)).replace('--disable=', ''))
return True
def is_configured(self):
return bool((self.userid and self.password) or self.activation_key)
| gpl-3.0 |
johnchronis/exareme | exareme-tools/madis/src/functions/aggregate/mining.py | 4 | 13491 | import re
import itertools
import setpath
import functions
import lib.jopts as jopts
from operator import itemgetter
import random
__docformat__ = 'reStructuredText en'
re_params=re.compile('(\w*):(.*)')
def consumer(func):
"""A decorator, advances func to its first yield point when called.
"""
from functools import wraps
@wraps(func)
def wrapper(*args,**kw):
gen = func(*args, **kw)
gen.next()
return gen
return wrapper
class freqitemsets:
"""
.. function:: freqitemsets(datacol, [threshold, noautothres, stats, maxlen]) -> [itemset_id:int, itemset_length:int, itemset_frequency:int, item:text]
Calculates frequent itemsets on a given column (datacol). The algorithm is tuned for the
case when we have many different items (in the order of millions), many input itemsets, but
small itemset length (10-20).
Returned table schema:
:itemset_id: Automatic itemset id
:itemset_length: Length of itemset
:itemset_frequency: How many times an itemset has been found
:item: Itemset's item value
Parameters:
:datacol:
Column on which to calculate frequent itemsets
:threshold: Default is 2
How many times an freq. itemset must appear for it to appear in the results
:noautothres: 1/0 (Default is 0)
Do not calculate the threshold automatically
:stats: 1/0 (Default is 0)
Return frequent itemset statistics
:maxlen: NUMBER (Default is no limit at all)
Maximum itemset length to search
Examples:
>>> table1('''
... 'car wood bike' 'first group'
... 'car car wood' 'first group'
... 'car wood' 'first group'
... 'car wood ice' 'first group'
... 'ice' 'second group'
... 'car ice' 'second group'
... 'car cream toy' 'second group'
... 'icecream ice car toy' 'second group'
... ''')
>>> sql("select b,freqitemsets(a, 'threshold:2', 'noautothres:1', 'maxlen:2') from table1 group by b")
b | itemset_id | itemset_length | itemset_frequency | item
---------------------------------------------------------------------
first group | 1 | 1 | 4 | wood
first group | 2 | 1 | 4 | car
first group | 3 | 2 | 4 | car
first group | 3 | 2 | 4 | wood
second group | 1 | 1 | 3 | ice
second group | 2 | 1 | 3 | car
second group | 3 | 1 | 2 | toy
second group | 4 | 2 | 2 | car
second group | 4 | 2 | 2 | ice
second group | 5 | 2 | 2 | car
second group | 5 | 2 | 2 | toy
>>> sql("select b,freqitemsets(a, 'stats:1') from table1 group by b")
b | MaxTransactionLength | CombinationCount | PassedTransactions | ValidKeywords
-------------------------------------------------------------------------------------------
first group | 3 | 2 | 3 | 2
first group | 3 | 1 | 1 | 2
first group | 3 | 0 | 0 | 0
second group | 4 | 3 | 3 | 3
second group | 4 | 0 | 3 | 0
"""
registered=True
multiset=True
def __init__(self):
self.threshold=2
self.startingthreshold=2
self.autothres=1
self.compress=0
self.initstatic=False
self.input={}
self.maxlength=0
self.kwcode={}
self.codekw={}
self.maxkwcode=0
self.overthres={}
self.belowthres={}
self.passedkw={}
self.init=True
self.itemset_id=0
self.maxlen=None
self.stats=False
def initargs(self, args):
self.init=False
for i in xrange(1, len(args)):
v=re_params.match(args[i])
if v is not None and v.groups()[0]!='' and v.groups()[1]!='' and i>0:
v=v.groups()
if v[0]=='threshold':
try:
self.threshold=int(v[1])
self.startingthreshold=self.threshold
except KeyboardInterrupt:
raise
except:
raise functions.OperatorError("FreqItemsets",'No integer value given for threshold')
if v[0]=='noautothres':
self.autothres=0
if v[0]=='compress':
self.compress=1
if v[0]=='maxlen':
self.maxlen=int(v[1])
if v[0]=='stats':
self.stats=True
def demultiplex(self, data):
iterable=None
iterpos=-1
for i in xrange(len(data)):
if hasattr(data[i],'__iter__')==True:
iterable=data[i]
iterpos=i
break
if iterpos==-1:
yield list(data)
else:
pre=list(data[0:iterpos])
post=list(data[iterpos+1:])
for i in iterable:
if hasattr(i,'__iter__')==False:
yield pre+[i]+post
else:
yield pre+list(i)+post
def insertcombfreq(self, comb, freq):
if comb in self.overthres:
self.overthres[comb]+=freq
else:
if comb in self.belowthres:
self.belowthres[comb]+=freq
else:
self.belowthres[comb]=freq
if self.belowthres[comb]>=self.threshold:
self.overthres[comb]=self.belowthres[comb]
del(self.belowthres[comb])
for k in comb:
if self.compress==0:
self.passedkw[k]=True
elif not k in self.passedkw:
self.passedkw[k]=self.overthres[comb]
else:
self.passedkw[k]+=self.overthres[comb]
def insertitemset(self, itemset):
if itemset not in self.input:
self.input[itemset]=1
else:
self.input[itemset]+=1
def cleanitemsets(self, minlength):
newitemsets={}
for k,v in self.input.iteritems():
itemset=tuple(i for i in k if i in self.passedkw)
if self.compress==1:
esoteric_itemset=tuple(i for i in itemset if self.passedkw[i]==v)
if len(esoteric_itemset)>0:
if len(itemset)>=minlength:
self.overthres[itemset]=v
itemset=tuple(i for i in itemset if self.passedkw[i]!=v)
if len(itemset)>=minlength:
if itemset not in newitemsets:
newitemsets[itemset]=v
else:
newitemsets[itemset]+=v
self.input=newitemsets
def step(self, *args):
if self.init==True:
self.initargs(args)
if len(args[0])==0:
return
itms=sorted(set(args[0].split(' ')))
itms=[x for x in itms if x!='']
li=len(itms)
if li>0:
if li>self.maxlength:
self.maxlength=li
inputkws=[]
for kw in itms:
if len(kw)==0:
print itms, args[0], len(args[0]), li
if kw not in self.kwcode:
self.kwcode[kw]=self.maxkwcode
self.codekw[self.maxkwcode]=kw
inputkws.append(self.maxkwcode)
self.insertcombfreq( (self.maxkwcode,),1 )
self.maxkwcode+=1
else:
itm=self.kwcode[kw]
self.insertcombfreq( (itm,),1 )
inputkws.append(itm)
if len(inputkws)>1:
self.insertitemset(tuple(inputkws))
def final(self):
if not self.stats:
yield ('itemset_id', 'itemset_length', 'itemset_frequency', 'item')
else:
yield ('MaxTransactionLength', 'CombinationCount', 'PassedTransactions', 'ValidKeywords')
splist=[{},{}]
del(self.kwcode)
splist[1]=self.overthres
if self.stats:
yield [self.maxlength, len(splist[1]), len(self.input), len(self.passedkw)]
if not self.stats:
for its,v in sorted(splist[1].items(), key=itemgetter(1),reverse=True):
self.itemset_id+=1
for i in self.demultiplex( (self.itemset_id, len([self.codekw[i] for i in its]), v, [self.codekw[i] for i in its]) ):
yield i
if self.maxlen==None:
self.maxlen=self.maxlength
for l in xrange(2, min(self.maxlength+1, self.maxlen+1)):
splist.append({})
self.belowthres={}
self.overthres={}
prevl=l-1
# Autothresholding
if self.autothres==1:
if len(self.input)==0 or len(self.passedkw)==0:
break
else:
self.threshold=self.startingthreshold + int(len(self.passedkw)/len(self.input))
self.cleanitemsets(l)
self.passedkw={}
prevsplist = splist[prevl]
icombs = itertools.combinations
insertcomb = self.insertcombfreq
for k,v in self.input.iteritems():
for k in icombs(k,l):
insertit=True
for i1 in icombs(k, prevl):
if i1 not in prevsplist:
insertit=False
break
if insertit:
insertcomb( k,v )
splist[l-1]={}
splist[l]=self.overthres
if self.stats:
yield [self.maxlength, len(splist[l]), len(self.input), len(self.passedkw)]
if not self.stats:
for its,v in sorted(splist[l].items(), key=itemgetter(1),reverse=True):
self.itemset_id+=1
for i in self.demultiplex( (self.itemset_id, len([self.codekw[i] for i in its]), v, [self.codekw[i] for i in its]) ):
yield i
del(self.overthres)
del(self.belowthres)
del(self.passedkw)
del(self.input)
del(self.codekw)
del(splist)
class sampledistvals:
"""
.. function:: sampledistvals(sample_size, C1, C2, C3) -> [C1, C2, C3]
Sampledistvals returns sample_size distinct values for each of the input C1..Cn columns.
>>> table1('''
... test1 2 3
... test1 2 3
... test2 4 2
... test4 2 t
... ''')
>>> sql("select sampledistvals(3, a, b, c) from table1")
C1 | C2 | C3
---------------------------------------------
["test1","test2","test4"] | [2,4] | [2,3,"t"]
"""
registered=True
def __init__(self):
self.vals=None
self.lenargs = -1
self.init=True
def step(self, *args):
if self.init:
self.lenargs = len(args)
self.vals = a=[set() for i in xrange(self.lenargs-1)]
self.init = False
for i in xrange(1, self.lenargs):
if len(self.vals[i-1])<args[0] and args[i] not in self.vals[i-1]:
self.vals[i-1].add(args[i])
def final(self):
yield tuple(['C'+str(i) for i in xrange(1, self.lenargs)] )
yield [jopts.toj(list(i)) for i in self.vals]
class samplegroup:
"""
.. function:: samplegroup(sample_size, C1, C2, C3)
Returns a random sample_size set of rows.
>>> table1('''
... test1 2 3
... test1 2 3
... test2 4 2
... test4 2 t
... ''')
>>> sql("select samplegroup(2, a, b, c) from table1") # doctest: +ELLIPSIS
C1 | C2 | C3
---------------
...
>>> sql("select samplegroup(2) from (select 5 where 5=6)") # doctest: +ELLIPSIS
"""
registered=True
def __init__(self):
self.samplelist = []
self.index = 0
self.random = random.randint
def step(self, *args):
# Generate the reservoir
if self.index < args[0]:
self.samplelist.append(args)
else:
r = self.random(0, self.index)
if r < args[0]:
self.samplelist[r] = args
self.index += 1
def final(self):
if self.samplelist == []:
yield tuple(['C1'])
else:
yield tuple(['C'+str(i) for i in xrange(1, len(self.samplelist[0]))] )
for r in self.samplelist:
yield list(r[1:])
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| mit |
chudaol/edx-platform | pavelib/acceptance_test.py | 54 | 1985 | """
Acceptance test tasks
"""
from paver.easy import task, cmdopts, needs
from pavelib.utils.test.suites import AcceptanceTestSuite
from optparse import make_option
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text # pylint: disable=invalid-name
__test__ = False # do not collect
@task
@needs(
'pavelib.prereqs.install_prereqs',
'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
("system=", "s", "System to act on"),
("default_store=", "m", "Default modulestore to use for course creation"),
("fasttest", "a", "Run without collectstatic"),
("extra_args=", "e", "adds as extra args to the test command"),
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity"),
make_option("--pdb", action="store_true", help="Launches an interactive debugger upon error"),
])
def test_acceptance(options):
"""
Run the acceptance tests for the either lms or cms
"""
opts = {
'fasttest': getattr(options, 'fasttest', False),
'system': getattr(options, 'system', None),
'default_store': getattr(options, 'default_store', None),
'verbosity': getattr(options, 'verbosity', 3),
'extra_args': getattr(options, 'extra_args', ''),
'pdb': getattr(options, 'pdb', False),
}
if opts['system'] not in ['cms', 'lms']:
msg = colorize(
'red',
'No system specified, running tests for both cms and lms.'
)
print(msg)
if opts['default_store'] not in ['draft', 'split']:
msg = colorize(
'red',
'No modulestore specified, running tests for both draft and split.'
)
print(msg)
suite = AcceptanceTestSuite('{} acceptance'.format(opts['system']), **opts)
suite.run()
| agpl-3.0 |
KublaikhanGeek/fastsocket | kernel/tools/perf/tests/attr.py | 19 | 9247 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
# RHEL6 - no support for
# exclude_callchain_kernel
# exclude_callchain_user
# sample_regs_user
# sample_stack_user
class Event(dict):
terms = [
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
# 'exclude_callchain_kernel',
# 'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
# 'sample_regs_user',
# 'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.info(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.info(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" running '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.info(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.info(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.info(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.info(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.info(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-2.0 |
lintzc/gpdb | src/test/tinc/ext/suds/umx/__init__.py | 203 | 1811 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides modules containing classes to support
unmarshalling (XML).
"""
from suds.sudsobject import Object
class Content(Object):
"""
@ivar node: The content source node.
@type node: L{sax.element.Element}
@ivar data: The (optional) content data.
@type data: L{Object}
@ivar text: The (optional) content (xml) text.
@type text: basestring
"""
extensions = []
def __init__(self, node, **kwargs):
Object.__init__(self)
self.node = node
self.data = None
self.text = None
for k,v in kwargs.items():
setattr(self, k, v)
def __getattr__(self, name):
if name not in self.__dict__:
if name in self.extensions:
v = None
setattr(self, name, v)
else:
raise AttributeError, \
'Content has no attribute %s' % name
else:
v = self.__dict__[name]
return v | apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.