repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
marty331/jakesclock | flask/lib/python2.7/site-packages/pyglet/libs/darwin/cocoapy/cocoalibs.py | 28 | 21168 | from ctypes import *
from ctypes import util
from .runtime import send_message, ObjCInstance
from .cocoatypes import *
######################################################################
# CORE FOUNDATION
cf = cdll.LoadLibrary(util.find_library('CoreFoundation'))
kCFStringEncodingUTF8 = 0x08000100
CFAllocatorRef = c_void_p
CFStringEncoding = c_uint32
cf.CFStringCreateWithCString.restype = c_void_p
cf.CFStringCreateWithCString.argtypes = [CFAllocatorRef, c_char_p, CFStringEncoding]
cf.CFRelease.restype = c_void_p
cf.CFRelease.argtypes = [c_void_p]
cf.CFStringGetLength.restype = CFIndex
cf.CFStringGetLength.argtypes = [c_void_p]
cf.CFStringGetMaximumSizeForEncoding.restype = CFIndex
cf.CFStringGetMaximumSizeForEncoding.argtypes = [CFIndex, CFStringEncoding]
cf.CFStringGetCString.restype = c_bool
cf.CFStringGetCString.argtypes = [c_void_p, c_char_p, CFIndex, CFStringEncoding]
cf.CFStringGetTypeID.restype = CFTypeID
cf.CFStringGetTypeID.argtypes = []
cf.CFAttributedStringCreate.restype = c_void_p
cf.CFAttributedStringCreate.argtypes = [CFAllocatorRef, c_void_p, c_void_p]
# Core Foundation type to Python type conversion functions
def CFSTR(string):
return ObjCInstance(c_void_p(cf.CFStringCreateWithCString(
None, string.encode('utf8'), kCFStringEncodingUTF8)))
# Other possible names for this method:
# at, ampersat, arobe, apenstaartje (little monkey tail), strudel,
# klammeraffe (spider monkey), little_mouse, arroba, sobachka (doggie)
# malpa (monkey), snabel (trunk), papaki (small duck), afna (monkey),
# kukac (caterpillar).
def get_NSString(string):
"""Autoreleased version of CFSTR"""
return CFSTR(string).autorelease()
def cfstring_to_string(cfstring):
length = cf.CFStringGetLength(cfstring)
size = cf.CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8)
buffer = c_buffer(size + 1)
result = cf.CFStringGetCString(cfstring, buffer, len(buffer), kCFStringEncodingUTF8)
if result:
return unicode(buffer.value, 'utf-8')
cf.CFDataCreate.restype = c_void_p
cf.CFDataCreate.argtypes = [c_void_p, c_void_p, CFIndex]
cf.CFDataGetBytes.restype = None
cf.CFDataGetBytes.argtypes = [c_void_p, CFRange, c_void_p]
cf.CFDataGetLength.restype = CFIndex
cf.CFDataGetLength.argtypes = [c_void_p]
cf.CFDictionaryGetValue.restype = c_void_p
cf.CFDictionaryGetValue.argtypes = [c_void_p, c_void_p]
cf.CFDictionaryAddValue.restype = None
cf.CFDictionaryAddValue.argtypes = [c_void_p, c_void_p, c_void_p]
cf.CFDictionaryCreateMutable.restype = c_void_p
cf.CFDictionaryCreateMutable.argtypes = [CFAllocatorRef, CFIndex, c_void_p, c_void_p]
cf.CFNumberCreate.restype = c_void_p
cf.CFNumberCreate.argtypes = [CFAllocatorRef, CFNumberType, c_void_p]
cf.CFNumberGetType.restype = CFNumberType
cf.CFNumberGetType.argtypes = [c_void_p]
cf.CFNumberGetValue.restype = c_ubyte
cf.CFNumberGetValue.argtypes = [c_void_p, CFNumberType, c_void_p]
cf.CFNumberGetTypeID.restype = CFTypeID
cf.CFNumberGetTypeID.argtypes = []
cf.CFGetTypeID.restype = CFTypeID
cf.CFGetTypeID.argtypes = [c_void_p]
# CFNumber.h
kCFNumberSInt8Type = 1
kCFNumberSInt16Type = 2
kCFNumberSInt32Type = 3
kCFNumberSInt64Type = 4
kCFNumberFloat32Type = 5
kCFNumberFloat64Type = 6
kCFNumberCharType = 7
kCFNumberShortType = 8
kCFNumberIntType = 9
kCFNumberLongType = 10
kCFNumberLongLongType = 11
kCFNumberFloatType = 12
kCFNumberDoubleType = 13
kCFNumberCFIndexType = 14
kCFNumberNSIntegerType = 15
kCFNumberCGFloatType = 16
kCFNumberMaxType = 16
def cfnumber_to_number(cfnumber):
"""Convert CFNumber to python int or float."""
numeric_type = cf.CFNumberGetType(cfnumber)
cfnum_to_ctype = {kCFNumberSInt8Type:c_int8, kCFNumberSInt16Type:c_int16,
kCFNumberSInt32Type:c_int32, kCFNumberSInt64Type:c_int64,
kCFNumberFloat32Type:c_float, kCFNumberFloat64Type:c_double,
kCFNumberCharType:c_byte, kCFNumberShortType:c_short,
kCFNumberIntType:c_int, kCFNumberLongType:c_long,
kCFNumberLongLongType:c_longlong, kCFNumberFloatType:c_float,
kCFNumberDoubleType:c_double, kCFNumberCFIndexType:CFIndex,
kCFNumberCGFloatType:CGFloat}
if numeric_type in cfnum_to_ctype:
t = cfnum_to_ctype[numeric_type]
result = t()
if cf.CFNumberGetValue(cfnumber, numeric_type, byref(result)):
return result.value
else:
raise Exception('cfnumber_to_number: unhandled CFNumber type %d' % numeric_type)
# Dictionary of cftypes matched to the method converting them to python values.
known_cftypes = { cf.CFStringGetTypeID() : cfstring_to_string,
cf.CFNumberGetTypeID() : cfnumber_to_number
}
def cftype_to_value(cftype):
"""Convert a CFType into an equivalent python type.
The convertible CFTypes are taken from the known_cftypes
dictionary, which may be added to if another library implements
its own conversion methods."""
if not cftype:
return None
typeID = cf.CFGetTypeID(cftype)
if typeID in known_cftypes:
convert_function = known_cftypes[typeID]
return convert_function(cftype)
else:
return cftype
cf.CFSetGetCount.restype = CFIndex
cf.CFSetGetCount.argtypes = [c_void_p]
cf.CFSetGetValues.restype = None
# PyPy 1.7 is fine with 2nd arg as POINTER(c_void_p),
# but CPython ctypes 1.1.0 complains, so just use c_void_p.
cf.CFSetGetValues.argtypes = [c_void_p, c_void_p]
def cfset_to_set(cfset):
"""Convert CFSet to python set."""
count = cf.CFSetGetCount(cfset)
buffer = (c_void_p * count)()
cf.CFSetGetValues(cfset, byref(buffer))
return set([ cftype_to_value(c_void_p(buffer[i])) for i in range(count) ])
cf.CFArrayGetCount.restype = CFIndex
cf.CFArrayGetCount.argtypes = [c_void_p]
cf.CFArrayGetValueAtIndex.restype = c_void_p
cf.CFArrayGetValueAtIndex.argtypes = [c_void_p, CFIndex]
def cfarray_to_list(cfarray):
"""Convert CFArray to python list."""
count = cf.CFArrayGetCount(cfarray)
return [ cftype_to_value(c_void_p(cf.CFArrayGetValueAtIndex(cfarray, i)))
for i in range(count) ]
kCFRunLoopDefaultMode = c_void_p.in_dll(cf, 'kCFRunLoopDefaultMode')
cf.CFRunLoopGetCurrent.restype = c_void_p
cf.CFRunLoopGetCurrent.argtypes = []
cf.CFRunLoopGetMain.restype = c_void_p
cf.CFRunLoopGetMain.argtypes = []
######################################################################
# APPLICATION KIT
# Even though we don't use this directly, it must be loaded so that
# we can find the NSApplication, NSWindow, and NSView classes.
appkit = cdll.LoadLibrary(util.find_library('AppKit'))
NSDefaultRunLoopMode = c_void_p.in_dll(appkit, 'NSDefaultRunLoopMode')
NSEventTrackingRunLoopMode = c_void_p.in_dll(appkit, 'NSEventTrackingRunLoopMode')
NSApplicationDidHideNotification = c_void_p.in_dll(appkit, 'NSApplicationDidHideNotification')
NSApplicationDidUnhideNotification = c_void_p.in_dll(appkit, 'NSApplicationDidUnhideNotification')
# /System/Library/Frameworks/AppKit.framework/Headers/NSEvent.h
NSAnyEventMask = 0xFFFFFFFFL # NSUIntegerMax
NSKeyDown = 10
NSKeyUp = 11
NSFlagsChanged = 12
NSApplicationDefined = 15
NSAlphaShiftKeyMask = 1 << 16
NSShiftKeyMask = 1 << 17
NSControlKeyMask = 1 << 18
NSAlternateKeyMask = 1 << 19
NSCommandKeyMask = 1 << 20
NSNumericPadKeyMask = 1 << 21
NSHelpKeyMask = 1 << 22
NSFunctionKeyMask = 1 << 23
NSInsertFunctionKey = 0xF727
NSDeleteFunctionKey = 0xF728
NSHomeFunctionKey = 0xF729
NSBeginFunctionKey = 0xF72A
NSEndFunctionKey = 0xF72B
NSPageUpFunctionKey = 0xF72C
NSPageDownFunctionKey = 0xF72D
# /System/Library/Frameworks/AppKit.framework/Headers/NSWindow.h
NSBorderlessWindowMask = 0
NSTitledWindowMask = 1 << 0
NSClosableWindowMask = 1 << 1
NSMiniaturizableWindowMask = 1 << 2
NSResizableWindowMask = 1 << 3
# /System/Library/Frameworks/AppKit.framework/Headers/NSPanel.h
NSUtilityWindowMask = 1 << 4
# /System/Library/Frameworks/AppKit.framework/Headers/NSGraphics.h
NSBackingStoreRetained = 0
NSBackingStoreNonretained = 1
NSBackingStoreBuffered = 2
# /System/Library/Frameworks/AppKit.framework/Headers/NSTrackingArea.h
NSTrackingMouseEnteredAndExited = 0x01
NSTrackingMouseMoved = 0x02
NSTrackingCursorUpdate = 0x04
NSTrackingActiveInActiveApp = 0x40
# /System/Library/Frameworks/AppKit.framework/Headers/NSOpenGL.h
NSOpenGLPFAAllRenderers = 1 # choose from all available renderers
NSOpenGLPFADoubleBuffer = 5 # choose a double buffered pixel format
NSOpenGLPFAStereo = 6 # stereo buffering supported
NSOpenGLPFAAuxBuffers = 7 # number of aux buffers
NSOpenGLPFAColorSize = 8 # number of color buffer bits
NSOpenGLPFAAlphaSize = 11 # number of alpha component bits
NSOpenGLPFADepthSize = 12 # number of depth buffer bits
NSOpenGLPFAStencilSize = 13 # number of stencil buffer bits
NSOpenGLPFAAccumSize = 14 # number of accum buffer bits
NSOpenGLPFAMinimumPolicy = 51 # never choose smaller buffers than requested
NSOpenGLPFAMaximumPolicy = 52 # choose largest buffers of type requested
NSOpenGLPFAOffScreen = 53 # choose an off-screen capable renderer
NSOpenGLPFAFullScreen = 54 # choose a full-screen capable renderer
NSOpenGLPFASampleBuffers = 55 # number of multi sample buffers
NSOpenGLPFASamples = 56 # number of samples per multi sample buffer
NSOpenGLPFAAuxDepthStencil = 57 # each aux buffer has its own depth stencil
NSOpenGLPFAColorFloat = 58 # color buffers store floating point pixels
NSOpenGLPFAMultisample = 59 # choose multisampling
NSOpenGLPFASupersample = 60 # choose supersampling
NSOpenGLPFASampleAlpha = 61 # request alpha filtering
NSOpenGLPFARendererID = 70 # request renderer by ID
NSOpenGLPFASingleRenderer = 71 # choose a single renderer for all screens
NSOpenGLPFANoRecovery = 72 # disable all failure recovery systems
NSOpenGLPFAAccelerated = 73 # choose a hardware accelerated renderer
NSOpenGLPFAClosestPolicy = 74 # choose the closest color buffer to request
NSOpenGLPFARobust = 75 # renderer does not need failure recovery
NSOpenGLPFABackingStore = 76 # back buffer contents are valid after swap
NSOpenGLPFAMPSafe = 78 # renderer is multi-processor safe
NSOpenGLPFAWindow = 80 # can be used to render to an onscreen window
NSOpenGLPFAMultiScreen = 81 # single window can span multiple screens
NSOpenGLPFACompliant = 83 # renderer is opengl compliant
NSOpenGLPFAScreenMask = 84 # bit mask of supported physical screens
NSOpenGLPFAPixelBuffer = 90 # can be used to render to a pbuffer
NSOpenGLPFARemotePixelBuffer = 91 # can be used to render offline to a pbuffer
NSOpenGLPFAAllowOfflineRenderers = 96 # allow use of offline renderers
NSOpenGLPFAAcceleratedCompute = 97 # choose a hardware accelerated compute device
NSOpenGLPFAVirtualScreenCount = 128 # number of virtual screens in this format
NSOpenGLCPSwapInterval = 222
# /System/Library/Frameworks/ApplicationServices.framework/Frameworks/...
# CoreGraphics.framework/Headers/CGImage.h
kCGImageAlphaNone = 0
kCGImageAlphaPremultipliedLast = 1
kCGImageAlphaPremultipliedFirst = 2
kCGImageAlphaLast = 3
kCGImageAlphaFirst = 4
kCGImageAlphaNoneSkipLast = 5
kCGImageAlphaNoneSkipFirst = 6
kCGImageAlphaOnly = 7
kCGImageAlphaPremultipliedLast = 1
kCGBitmapAlphaInfoMask = 0x1F
kCGBitmapFloatComponents = 1 << 8
kCGBitmapByteOrderMask = 0x7000
kCGBitmapByteOrderDefault = 0 << 12
kCGBitmapByteOrder16Little = 1 << 12
kCGBitmapByteOrder32Little = 2 << 12
kCGBitmapByteOrder16Big = 3 << 12
kCGBitmapByteOrder32Big = 4 << 12
# NSApplication.h
NSApplicationPresentationDefault = 0
NSApplicationPresentationHideDock = 1 << 1
NSApplicationPresentationHideMenuBar = 1 << 3
NSApplicationPresentationDisableProcessSwitching = 1 << 5
NSApplicationPresentationDisableHideApplication = 1 << 8
# NSRunningApplication.h
NSApplicationActivationPolicyRegular = 0
NSApplicationActivationPolicyAccessory = 1
NSApplicationActivationPolicyProhibited = 2
######################################################################
# QUARTZ / COREGRAPHICS
quartz = cdll.LoadLibrary(util.find_library('quartz'))
CGDirectDisplayID = c_uint32 # CGDirectDisplay.h
CGError = c_int32 # CGError.h
CGBitmapInfo = c_uint32 # CGImage.h
# /System/Library/Frameworks/ApplicationServices.framework/Frameworks/...
# ImageIO.framework/Headers/CGImageProperties.h
kCGImagePropertyGIFDictionary = c_void_p.in_dll(quartz, 'kCGImagePropertyGIFDictionary')
kCGImagePropertyGIFDelayTime = c_void_p.in_dll(quartz, 'kCGImagePropertyGIFDelayTime')
# /System/Library/Frameworks/ApplicationServices.framework/Frameworks/...
# CoreGraphics.framework/Headers/CGColorSpace.h
kCGRenderingIntentDefault = 0
quartz.CGDisplayIDToOpenGLDisplayMask.restype = c_uint32
quartz.CGDisplayIDToOpenGLDisplayMask.argtypes = [c_uint32]
quartz.CGMainDisplayID.restype = CGDirectDisplayID
quartz.CGMainDisplayID.argtypes = []
quartz.CGShieldingWindowLevel.restype = c_int32
quartz.CGShieldingWindowLevel.argtypes = []
quartz.CGCursorIsVisible.restype = c_bool
quartz.CGDisplayCopyAllDisplayModes.restype = c_void_p
quartz.CGDisplayCopyAllDisplayModes.argtypes = [CGDirectDisplayID, c_void_p]
quartz.CGDisplaySetDisplayMode.restype = CGError
quartz.CGDisplaySetDisplayMode.argtypes = [CGDirectDisplayID, c_void_p, c_void_p]
quartz.CGDisplayCapture.restype = CGError
quartz.CGDisplayCapture.argtypes = [CGDirectDisplayID]
quartz.CGDisplayRelease.restype = CGError
quartz.CGDisplayRelease.argtypes = [CGDirectDisplayID]
quartz.CGDisplayCopyDisplayMode.restype = c_void_p
quartz.CGDisplayCopyDisplayMode.argtypes = [CGDirectDisplayID]
quartz.CGDisplayModeGetRefreshRate.restype = c_double
quartz.CGDisplayModeGetRefreshRate.argtypes = [c_void_p]
quartz.CGDisplayModeRetain.restype = c_void_p
quartz.CGDisplayModeRetain.argtypes = [c_void_p]
quartz.CGDisplayModeRelease.restype = None
quartz.CGDisplayModeRelease.argtypes = [c_void_p]
quartz.CGDisplayModeGetWidth.restype = c_size_t
quartz.CGDisplayModeGetWidth.argtypes = [c_void_p]
quartz.CGDisplayModeGetHeight.restype = c_size_t
quartz.CGDisplayModeGetHeight.argtypes = [c_void_p]
quartz.CGDisplayModeCopyPixelEncoding.restype = c_void_p
quartz.CGDisplayModeCopyPixelEncoding.argtypes = [c_void_p]
quartz.CGGetActiveDisplayList.restype = CGError
quartz.CGGetActiveDisplayList.argtypes = [c_uint32, POINTER(CGDirectDisplayID), POINTER(c_uint32)]
quartz.CGDisplayBounds.restype = CGRect
quartz.CGDisplayBounds.argtypes = [CGDirectDisplayID]
quartz.CGImageSourceCreateWithData.restype = c_void_p
quartz.CGImageSourceCreateWithData.argtypes = [c_void_p, c_void_p]
quartz.CGImageSourceCreateImageAtIndex.restype = c_void_p
quartz.CGImageSourceCreateImageAtIndex.argtypes = [c_void_p, c_size_t, c_void_p]
quartz.CGImageSourceCopyPropertiesAtIndex.restype = c_void_p
quartz.CGImageSourceCopyPropertiesAtIndex.argtypes = [c_void_p, c_size_t, c_void_p]
quartz.CGImageGetDataProvider.restype = c_void_p
quartz.CGImageGetDataProvider.argtypes = [c_void_p]
quartz.CGDataProviderCopyData.restype = c_void_p
quartz.CGDataProviderCopyData.argtypes = [c_void_p]
quartz.CGDataProviderCreateWithCFData.restype = c_void_p
quartz.CGDataProviderCreateWithCFData.argtypes = [c_void_p]
quartz.CGImageCreate.restype = c_void_p
quartz.CGImageCreate.argtypes = [c_size_t, c_size_t, c_size_t, c_size_t, c_size_t, c_void_p, c_uint32, c_void_p, c_void_p, c_bool, c_int]
quartz.CGImageRelease.restype = None
quartz.CGImageRelease.argtypes = [c_void_p]
quartz.CGImageGetBytesPerRow.restype = c_size_t
quartz.CGImageGetBytesPerRow.argtypes = [c_void_p]
quartz.CGImageGetWidth.restype = c_size_t
quartz.CGImageGetWidth.argtypes = [c_void_p]
quartz.CGImageGetHeight.restype = c_size_t
quartz.CGImageGetHeight.argtypes = [c_void_p]
quartz.CGImageGetBitsPerPixel.restype = c_size_t
quartz.CGImageGetBitsPerPixel.argtypes = [c_void_p]
quartz.CGImageGetBitmapInfo.restype = CGBitmapInfo
quartz.CGImageGetBitmapInfo.argtypes = [c_void_p]
quartz.CGColorSpaceCreateDeviceRGB.restype = c_void_p
quartz.CGColorSpaceCreateDeviceRGB.argtypes = []
quartz.CGDataProviderRelease.restype = None
quartz.CGDataProviderRelease.argtypes = [c_void_p]
quartz.CGColorSpaceRelease.restype = None
quartz.CGColorSpaceRelease.argtypes = [c_void_p]
quartz.CGWarpMouseCursorPosition.restype = CGError
quartz.CGWarpMouseCursorPosition.argtypes = [CGPoint]
quartz.CGDisplayMoveCursorToPoint.restype = CGError
quartz.CGDisplayMoveCursorToPoint.argtypes = [CGDirectDisplayID, CGPoint]
quartz.CGAssociateMouseAndMouseCursorPosition.restype = CGError
quartz.CGAssociateMouseAndMouseCursorPosition.argtypes = [c_bool]
quartz.CGBitmapContextCreate.restype = c_void_p
quartz.CGBitmapContextCreate.argtypes = [c_void_p, c_size_t, c_size_t, c_size_t, c_size_t, c_void_p, CGBitmapInfo]
quartz.CGBitmapContextCreateImage.restype = c_void_p
quartz.CGBitmapContextCreateImage.argtypes = [c_void_p]
quartz.CGFontCreateWithDataProvider.restype = c_void_p
quartz.CGFontCreateWithDataProvider.argtypes = [c_void_p]
quartz.CGFontCreateWithFontName.restype = c_void_p
quartz.CGFontCreateWithFontName.argtypes = [c_void_p]
quartz.CGContextDrawImage.restype = None
quartz.CGContextDrawImage.argtypes = [c_void_p, CGRect, c_void_p]
quartz.CGContextRelease.restype = None
quartz.CGContextRelease.argtypes = [c_void_p]
quartz.CGContextSetTextPosition.restype = None
quartz.CGContextSetTextPosition.argtypes = [c_void_p, CGFloat, CGFloat]
quartz.CGContextSetShouldAntialias.restype = None
quartz.CGContextSetShouldAntialias.argtypes = [c_void_p, c_bool]
######################################################################
# CORETEXT
ct = cdll.LoadLibrary(util.find_library('CoreText'))
# Types
CTFontOrientation = c_uint32 # CTFontDescriptor.h
CTFontSymbolicTraits = c_uint32 # CTFontTraits.h
# CoreText constants
kCTFontAttributeName = c_void_p.in_dll(ct, 'kCTFontAttributeName')
kCTFontFamilyNameAttribute = c_void_p.in_dll(ct, 'kCTFontFamilyNameAttribute')
kCTFontSymbolicTrait = c_void_p.in_dll(ct, 'kCTFontSymbolicTrait')
kCTFontWeightTrait = c_void_p.in_dll(ct, 'kCTFontWeightTrait')
kCTFontTraitsAttribute = c_void_p.in_dll(ct, 'kCTFontTraitsAttribute')
# constants from CTFontTraits.h
kCTFontItalicTrait = (1 << 0)
kCTFontBoldTrait = (1 << 1)
ct.CTLineCreateWithAttributedString.restype = c_void_p
ct.CTLineCreateWithAttributedString.argtypes = [c_void_p]
ct.CTLineDraw.restype = None
ct.CTLineDraw.argtypes = [c_void_p, c_void_p]
ct.CTFontGetBoundingRectsForGlyphs.restype = CGRect
ct.CTFontGetBoundingRectsForGlyphs.argtypes = [c_void_p, CTFontOrientation, POINTER(CGGlyph), POINTER(CGRect), CFIndex]
ct.CTFontGetAdvancesForGlyphs.restype = c_double
ct.CTFontGetAdvancesForGlyphs.argtypes = [c_void_p, CTFontOrientation, POINTER(CGGlyph), POINTER(CGSize), CFIndex]
ct.CTFontGetAscent.restype = CGFloat
ct.CTFontGetAscent.argtypes = [c_void_p]
ct.CTFontGetDescent.restype = CGFloat
ct.CTFontGetDescent.argtypes = [c_void_p]
ct.CTFontGetSymbolicTraits.restype = CTFontSymbolicTraits
ct.CTFontGetSymbolicTraits.argtypes = [c_void_p]
ct.CTFontGetGlyphsForCharacters.restype = c_bool
ct.CTFontGetGlyphsForCharacters.argtypes = [c_void_p, POINTER(UniChar), POINTER(CGGlyph), CFIndex]
ct.CTFontCreateWithGraphicsFont.restype = c_void_p
ct.CTFontCreateWithGraphicsFont.argtypes = [c_void_p, CGFloat, c_void_p, c_void_p]
ct.CTFontCopyFamilyName.restype = c_void_p
ct.CTFontCopyFamilyName.argtypes = [c_void_p]
ct.CTFontCopyFullName.restype = c_void_p
ct.CTFontCopyFullName.argtypes = [c_void_p]
ct.CTFontCreateWithFontDescriptor.restype = c_void_p
ct.CTFontCreateWithFontDescriptor.argtypes = [c_void_p, CGFloat, c_void_p]
ct.CTFontDescriptorCreateWithAttributes.restype = c_void_p
ct.CTFontDescriptorCreateWithAttributes.argtypes = [c_void_p]
######################################################################
# FOUNDATION
foundation = cdll.LoadLibrary(util.find_library('Foundation'))
foundation.NSMouseInRect.restype = c_bool
foundation.NSMouseInRect.argtypes = [NSPoint, NSRect, c_bool]
| gpl-2.0 |
surgebiswas/poker | PokerBots_2017/Johnny/theano/gof/lazylinker_c.py | 7 | 5872 | import errno
import logging
import os
from six.moves import reload_module as reload
import sys
import warnings
import theano
from theano import config
from theano.gof.compilelock import get_lock, release_lock
from theano.gof import cmodule
_logger = logging.getLogger('theano.gof.lazylinker_c')
force_compile = False
version = 0.21 # must match constant returned in function get_version()
lazylinker_ext = None
def try_import():
global lazylinker_ext
sys.path[0:0] = [config.compiledir]
import lazylinker_ext # noqa
del sys.path[0]
def try_reload():
sys.path[0:0] = [config.compiledir]
reload(lazylinker_ext)
del sys.path[0]
try:
# See gh issue #728 for why these lines are here. Summary: compiledir must
# be at the beginning of the path to avoid conflicts with any other
# lazylinker_ext modules that might exist (this step handled in try_import
# and try_reload). An __init__.py file must be created for the same reason.
# Note that these lines may seem redundant (they are repeated in
# compile_str()) but if another lazylinker_ext does exist then it will be
# imported and compile_str won't get called at all.
location = os.path.join(config.compiledir, 'lazylinker_ext')
if not os.path.exists(location):
try:
# Try to make the location
os.mkdir(location)
except OSError as e:
# If we get an error, verify that the error was # 17, the
# path already exists, and that it is a directory Note: we
# can't check if it exists before making it, because we
# are not holding the lock right now, so we could race
# another process and get error 17 if we lose the race
assert e.errno == errno.EEXIST
assert os.path.isdir(location)
init_file = os.path.join(location, '__init__.py')
if not os.path.exists(init_file):
try:
open(init_file, 'w').close()
except IOError as e:
if os.path.exists(init_file):
pass # has already been created
else:
e.args += ('%s exist? %s' % (location,
os.path.exists(location)),)
raise
_need_reload = False
if force_compile:
raise ImportError()
else:
try_import()
_need_reload = True
if version != getattr(lazylinker_ext, '_version', None):
raise ImportError()
except ImportError:
get_lock()
try:
# Maybe someone else already finished compiling it while we were
# waiting for the lock?
try:
if force_compile:
raise ImportError()
if _need_reload:
# The module was successfully imported earlier: we need to
# reload it to check if the version was updated.
try_reload()
else:
try_import()
_need_reload = True
if version != getattr(lazylinker_ext, '_version', None):
raise ImportError()
except ImportError:
# It is useless to try to compile if there isn't any
# compiler! But we still want to try to load it, in case
# the cache was copied from another computer.
if not theano.config.cxx:
raise
_logger.info("Compiling new CVM")
dirname = 'lazylinker_ext'
cfile = os.path.join(theano.__path__[0], 'gof', 'lazylinker_c.c')
if not os.path.exists(cfile):
# This can happen in not normal case. We just
# disable the c clinker. If we are here the user
# didn't disable the compiler, so print a warning.
warnings.warn(
"The file lazylinker_c.c is not available. This do"
"not happen normally. You are probably in a strange"
"setup. This mean Theano can not use the cvm:"
"our c execution engine for Theano function. If you"
"want to remove this warning, use the Theano flag"
"'cxx=' (set to an empty string) to disable all c"
"code generation."
)
raise ImportError("The file lazylinker_c.c is not available.")
code = open(cfile).read()
loc = os.path.join(config.compiledir, dirname)
if not os.path.exists(loc):
try:
os.mkdir(loc)
except OSError as e:
assert e.errno == errno.EEXIST
assert os.path.exists(loc)
args = cmodule.GCC_compiler.compile_args()
cmodule.GCC_compiler.compile_str(dirname, code, location=loc,
preargs=args)
# Save version into the __init__.py file.
init_py = os.path.join(loc, '__init__.py')
open(init_py, 'w').write('_version = %s\n' % version)
# If we just compiled the module for the first time, then it was
# imported at the same time: we need to make sure we do not
# reload the now outdated __init__.pyc below.
init_pyc = os.path.join(loc, '__init__.pyc')
if os.path.isfile(init_pyc):
os.remove(init_pyc)
try_import()
try_reload()
from lazylinker_ext import lazylinker_ext as lazy_c
assert (lazylinker_ext._version ==
lazy_c.get_version())
_logger.info("New version %s", lazylinker_ext._version)
finally:
# Release lock on compilation directory.
release_lock()
from lazylinker_ext.lazylinker_ext import * # noqa
assert force_compile or (version == get_version())
| mit |
kingvuplus/Test-OBH | lib/python/Plugins/Extensions/DVDBurn/DVDTitle.py | 24 | 6506 | from Components.config import config, ConfigSubsection, ConfigSubList, ConfigInteger, ConfigText, ConfigSelection, getConfigListEntry, ConfigSequence, ConfigYesNo
import TitleCutter
class ConfigFixedText(ConfigText):
def __init__(self, text, visible_width=60):
ConfigText.__init__(self, default = text, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
class DVDTitle:
def __init__(self, project):
self.properties = ConfigSubsection()
self.properties.menutitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.menusubtitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.aspect = ConfigSelection(choices = [("4:3", _("4:3")), ("16:9", _("16:9"))])
self.properties.widescreen = ConfigSelection(choices = [("nopanscan", "nopanscan"), ("noletterbox", "noletterbox")])
self.properties.autochapter = ConfigInteger(default = 0, limits = (0, 60))
self.properties.audiotracks = ConfigSubList()
self.DVBname = _("Title")
self.DVBdescr = _("Description")
self.DVBchannel = _("Channel")
self.cuesheet = [ ]
self.source = None
self.filesize = 0
self.estimatedDiskspace = 0
self.inputfile = ""
self.cutlist = [ ]
self.chaptermarks = [ ]
self.timeCreate = None
self.VideoType = -1
self.project = project
self.length = 0
def addService(self, service):
from os import path
from enigma import eServiceCenter, iServiceInformation
from ServiceReference import ServiceReference
from time import localtime, time
self.source = service
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(service)
sDescr = info and info.getInfoString(service, iServiceInformation.sDescription) or ""
self.DVBdescr = sDescr
sTimeCreate = info.getInfo(service, iServiceInformation.sTimeCreate)
if sTimeCreate > 1:
self.timeCreate = localtime(sTimeCreate)
serviceref = ServiceReference(info.getInfoString(service, iServiceInformation.sServiceref))
name = info and info.getName(service) or "Title" + sDescr
self.DVBname = name
self.DVBchannel = serviceref.getServiceName()
self.inputfile = service.getPath()
self.filesize = path.getsize(self.inputfile)
self.estimatedDiskspace = self.filesize
self.length = info.getLength(service)
def addFile(self, filename):
from enigma import eServiceReference
ref = eServiceReference(1, 0, filename)
self.addService(ref)
self.project.session.openWithCallback(self.titleEditDone, TitleCutter.CutlistReader, self)
def titleEditDone(self, cutlist):
self.initDVDmenuText(len(self.project.titles))
self.cuesheet = cutlist
self.produceFinalCuesheet()
def initDVDmenuText(self, track):
s = self.project.menutemplate.settings
self.properties.menutitle.setValue(self.formatDVDmenuText(s.titleformat.getValue(), track))
self.properties.menusubtitle.setValue(self.formatDVDmenuText(s.subtitleformat.getValue(), track))
def formatDVDmenuText(self, template, track):
template = template.replace("$i", str(track))
template = template.replace("$t", self.DVBname)
template = template.replace("$d", self.DVBdescr)
template = template.replace("$c", str(len(self.chaptermarks)+1))
template = template.replace("$f", self.inputfile)
template = template.replace("$C", self.DVBchannel)
#if template.find("$A") >= 0:
from TitleProperties import languageChoices
audiolist = [ ]
for audiotrack in self.properties.audiotracks:
active = audiotrack.active.getValue()
if active:
trackstring = audiotrack.format.getValue()
language = audiotrack.language.getValue()
if languageChoices.langdict.has_key(language):
trackstring += ' (' + languageChoices.langdict[language] + ')'
audiolist.append(trackstring)
audiostring = ', '.join(audiolist)
template = template.replace("$A", audiostring)
if template.find("$l") >= 0:
l = self.length
lengthstring = "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
template = template.replace("$l", lengthstring)
if self.timeCreate:
template = template.replace("$Y", str(self.timeCreate[0]))
template = template.replace("$M", str(self.timeCreate[1]))
template = template.replace("$D", str(self.timeCreate[2]))
timestring = "%d:%02d" % (self.timeCreate[3], self.timeCreate[4])
template = template.replace("$T", timestring)
else:
template = template.replace("$Y", "").replace("$M", "").replace("$D", "").replace("$T", "")
return template
def produceFinalCuesheet(self):
CUT_TYPE_IN = 0
CUT_TYPE_OUT = 1
CUT_TYPE_MARK = 2
CUT_TYPE_LAST = 3
accumulated_in = 0
accumulated_at = 0
last_in = 0
self.cutlist = [ ]
self.chaptermarks = [ ]
# our demuxer expects *strictly* IN,OUT lists.
currently_in = not any(type == CUT_TYPE_IN for pts, type in self.cuesheet)
if currently_in:
self.cutlist.append(0) # emulate "in" at first
for (pts, type) in self.cuesheet:
#print "pts=", pts, "type=", type, "accumulated_in=", accumulated_in, "accumulated_at=", accumulated_at, "last_in=", last_in
if type == CUT_TYPE_IN and not currently_in:
self.cutlist.append(pts)
last_in = pts
currently_in = True
if type == CUT_TYPE_OUT and currently_in:
self.cutlist.append(pts)
# accumulate the segment
accumulated_in += pts - last_in
accumulated_at = pts
currently_in = False
if type == CUT_TYPE_MARK and currently_in:
# relocate chaptermark against "in" time. This is not 100% accurate,
# as the in/out points are not.
reloc_pts = pts - last_in + accumulated_in
self.chaptermarks.append(reloc_pts)
if len(self.cutlist) > 1:
part = accumulated_in / (self.length*90000.0)
usedsize = int ( part * self.filesize )
self.estimatedDiskspace = usedsize
self.length = accumulated_in / 90000
def getChapterMarks(self, template="$h:$m:$s.$t"):
timestamps = [ ]
chapters = [ ]
minutes = self.properties.autochapter.getValue()
if len(self.chaptermarks) < 1 and minutes > 0:
chapterpts = 0
while chapterpts < (self.length-60*minutes)*90000:
chapterpts += 90000 * 60 * minutes
chapters.append(chapterpts)
else:
chapters = self.chaptermarks
for p in chapters:
timestring = template.replace("$h", str(p / (90000 * 3600)))
timestring = timestring.replace("$m", ("%02d" % (p % (90000 * 3600) / (90000 * 60))))
timestring = timestring.replace("$s", ("%02d" % (p % (90000 * 60) / 90000)))
timestring = timestring.replace("$t", ("%03d" % ((p % 90000) / 90)))
timestamps.append(timestring)
return timestamps
| gpl-2.0 |
mwest1066/PrairieLearn | elements/pl-integer-input/pl-integer-input.py | 2 | 8803 | import lxml.html
from html import escape
import chevron
import math
import prairielearn as pl
import numpy as np
import random
WEIGHT_DEFAULT = 1
CORRECT_ANSWER_DEFAULT = None
LABEL_DEFAULT = None
SUFFIX_DEFAULT = None
DISPLAY_DEFAULT = 'inline'
def prepare(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
required_attribs = ['answers-name']
optional_attribs = ['weight', 'correct-answer', 'label', 'suffix', 'display']
pl.check_attribs(element, required_attribs, optional_attribs)
name = pl.get_string_attrib(element, 'answers-name')
correct_answer = pl.get_integer_attrib(element, 'correct-answer', CORRECT_ANSWER_DEFAULT)
if correct_answer is not None:
if name in data['correct_answers']:
raise Exception('duplicate correct_answers variable name: %s' % name)
data['correct_answers'][name] = correct_answer
def render(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
label = pl.get_string_attrib(element, 'label', LABEL_DEFAULT)
suffix = pl.get_string_attrib(element, 'suffix', SUFFIX_DEFAULT)
display = pl.get_string_attrib(element, 'display', DISPLAY_DEFAULT)
if data['panel'] == 'question':
editable = data['editable']
raw_submitted_answer = data['raw_submitted_answers'].get(name, None)
# Get info strings
info_params = {'format': True}
with open('pl-integer-input.mustache', 'r', encoding='utf-8') as f:
info = chevron.render(f, info_params).strip()
with open('pl-integer-input.mustache', 'r', encoding='utf-8') as f:
info_params.pop('format', None)
info_params['shortformat'] = True
shortinfo = chevron.render(f, info_params).strip()
html_params = {
'question': True,
'name': name,
'label': label,
'suffix': suffix,
'editable': editable,
'info': info,
'shortinfo': shortinfo,
'uuid': pl.get_uuid()
}
partial_score = data['partial_scores'].get(name, {'score': None})
score = partial_score.get('score', None)
if score is not None:
try:
score = float(score)
if score >= 1:
html_params['correct'] = True
elif score > 0:
html_params['partial'] = math.floor(score * 100)
else:
html_params['incorrect'] = True
except Exception:
raise ValueError('invalid score' + score)
if display == 'inline':
html_params['inline'] = True
elif display == 'block':
html_params['block'] = True
else:
raise ValueError('method of display "%s" is not valid (must be "inline" or "block")' % display)
if raw_submitted_answer is not None:
html_params['raw_submitted_answer'] = escape(raw_submitted_answer)
with open('pl-integer-input.mustache', 'r', encoding='utf-8') as f:
html = chevron.render(f, html_params).strip()
elif data['panel'] == 'submission':
parse_error = data['format_errors'].get(name, None)
html_params = {
'submission': True,
'label': label,
'parse_error': parse_error,
'uuid': pl.get_uuid()
}
if parse_error is None:
# Get submitted answer, raising an exception if it does not exist
a_sub = data['submitted_answers'].get(name, None)
if a_sub is None:
raise Exception('submitted answer is None')
# If answer is in a format generated by pl.to_json, convert it
# back to a standard type (otherwise, do nothing)
a_sub = pl.from_json(a_sub)
html_params['suffix'] = suffix
html_params['a_sub'] = '{:d}'.format(a_sub)
else:
raw_submitted_answer = data['raw_submitted_answers'].get(name, None)
if raw_submitted_answer is not None:
html_params['raw_submitted_answer'] = escape(raw_submitted_answer)
partial_score = data['partial_scores'].get(name, {'score': None})
score = partial_score.get('score', None)
if score is not None:
try:
score = float(score)
if score >= 1:
html_params['correct'] = True
elif score > 0:
html_params['partial'] = math.floor(score * 100)
else:
html_params['incorrect'] = True
except Exception:
raise ValueError('invalid score' + score)
with open('pl-integer-input.mustache', 'r', encoding='utf-8') as f:
html = chevron.render(f, html_params).strip()
elif data['panel'] == 'answer':
a_tru = pl.from_json(data['correct_answers'].get(name, None))
if a_tru is not None:
html_params = {'answer': True, 'label': label, 'a_tru': '{:d}'.format(a_tru), 'suffix': suffix}
with open('pl-integer-input.mustache', 'r', encoding='utf-8') as f:
html = chevron.render(f, html_params).strip()
else:
html = ''
else:
raise Exception('Invalid panel type: %s' % data['panel'])
return html
def parse(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
# Get submitted answer or return parse_error if it does not exist
a_sub = data['submitted_answers'].get(name, None)
if a_sub is None:
data['format_errors'][name] = 'No submitted answer.'
data['submitted_answers'][name] = None
return
# Convert to integer
try:
a_sub_parsed = pl.string_to_integer(a_sub)
if a_sub_parsed is None:
raise ValueError('invalid submitted answer (wrong type)')
if not np.isfinite(a_sub_parsed):
raise ValueError('invalid submitted answer (not finite)')
data['submitted_answers'][name] = pl.to_json(a_sub_parsed)
except Exception:
data['format_errors'][name] = 'Invalid format. The submitted answer was not an integer.'
data['submitted_answers'][name] = None
def grade(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
# Get weight
weight = pl.get_integer_attrib(element, 'weight', WEIGHT_DEFAULT)
# Get true answer (if it does not exist, create no grade - leave it
# up to the question code)
a_tru = pl.from_json(data['correct_answers'].get(name, None))
if a_tru is None:
return
# Get submitted answer (if it does not exist, score is zero)
a_sub = data['submitted_answers'].get(name, None)
if a_sub is None:
data['partial_scores'][name] = {'score': 0, 'weight': weight}
return
# If submitted answer is in a format generated by pl.to_json, convert it
# back to a standard type (otherwise, do nothing)
a_sub = pl.from_json(a_sub)
# Cast both submitted and true answers as integers.
a_tru = int(a_tru)
a_sub = int(a_sub)
if a_tru == a_sub:
data['partial_scores'][name] = {'score': 1, 'weight': weight}
else:
data['partial_scores'][name] = {'score': 0, 'weight': weight}
def test(element_html, data):
element = lxml.html.fragment_fromstring(element_html)
name = pl.get_string_attrib(element, 'answers-name')
weight = pl.get_integer_attrib(element, 'weight', WEIGHT_DEFAULT)
# Get correct answer
a_tru = data['correct_answers'][name]
# If correct answer is in a format generated by pl.to_json, convert it
# back to a standard type (otherwise, do nothing)
a_tru = pl.from_json(a_tru)
result = random.choices(['correct', 'incorrect', 'invalid'], [5, 5, 1])[0]
if result == 'correct':
data['raw_submitted_answers'][name] = str(a_tru)
data['partial_scores'][name] = {'score': 1, 'weight': weight}
elif result == 'incorrect':
data['raw_submitted_answers'][name] = str(a_tru + (random.randint(1, 11) * random.choice([-1, 1])))
data['partial_scores'][name] = {'score': 0, 'weight': weight}
elif result == 'invalid':
# FIXME: add more invalid expressions, make text of format_errors
# correct, and randomize
if random.choice([True, False]):
data['raw_submitted_answers'][name] = '1 + 2'
else:
data['raw_submitted_answers'][name] = '3.4'
data['format_errors'][name] = 'invalid'
else:
raise Exception('invalid result: %s' % result)
| agpl-3.0 |
kraziegent/mysql-5.6 | xtrabackup/test/python/subunit/details.py | 64 | 3951 | #
# subunit: extensions to Python unittest to get test results from subprocesses.
# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Handlers for outcome details."""
from testtools import content, content_type
from testtools.compat import _b, BytesIO
from subunit import chunked
end_marker = _b("]\n")
quoted_marker = _b(" ]")
empty = _b('')
class DetailsParser(object):
"""Base class/API reference for details parsing."""
class SimpleDetailsParser(DetailsParser):
"""Parser for single-part [] delimited details."""
def __init__(self, state):
self._message = _b("")
self._state = state
def lineReceived(self, line):
if line == end_marker:
self._state.endDetails()
return
if line[0:2] == quoted_marker:
# quoted ] start
self._message += line[1:]
else:
self._message += line
def get_details(self, style=None):
result = {}
if not style:
# We know that subunit/testtools serialise [] formatted
# tracebacks as utf8, but perhaps we need a ReplacingContent
# or something like that.
result['traceback'] = content.Content(
content_type.ContentType("text", "x-traceback",
{"charset": "utf8"}),
lambda:[self._message])
else:
if style == 'skip':
name = 'reason'
else:
name = 'message'
result[name] = content.Content(
content_type.ContentType("text", "plain"),
lambda:[self._message])
return result
def get_message(self):
return self._message
class MultipartDetailsParser(DetailsParser):
"""Parser for multi-part [] surrounded MIME typed chunked details."""
def __init__(self, state):
self._state = state
self._details = {}
self._parse_state = self._look_for_content
def _look_for_content(self, line):
if line == end_marker:
self._state.endDetails()
return
# TODO error handling
field, value = line[:-1].decode('utf8').split(' ', 1)
try:
main, sub = value.split('/')
except ValueError:
raise ValueError("Invalid MIME type %r" % value)
self._content_type = content_type.ContentType(main, sub)
self._parse_state = self._get_name
def _get_name(self, line):
self._name = line[:-1].decode('utf8')
self._body = BytesIO()
self._chunk_parser = chunked.Decoder(self._body)
self._parse_state = self._feed_chunks
def _feed_chunks(self, line):
residue = self._chunk_parser.write(line)
if residue is not None:
# Line based use always ends on no residue.
assert residue == empty, 'residue: %r' % (residue,)
body = self._body
self._details[self._name] = content.Content(
self._content_type, lambda:[body.getvalue()])
self._chunk_parser.close()
self._parse_state = self._look_for_content
def get_details(self, for_skip=False):
return self._details
def get_message(self):
return None
def lineReceived(self, line):
self._parse_state(line)
| gpl-2.0 |
zzxuanyuan/root | build/unix/git_coreteam.py | 30 | 7379 | #!/usr/bin/python
#Author: Timur Pocheptsov, 17/03/2014.
#A script to generate rootcoreteam.h from git stats.
from urllib import urlopen
from HTMLParser import HTMLParser
header1 = r"""#ifndef ROOT_ROOTCoreTeam
#define ROOT_ROOTCoreTeam
namespace ROOT {
namespace ROOTX {
//This file is automatically generated with names from http://root.cern.ch/gitstats/authors.html.
//The names are sorted in an alphabetical order (sorted by a family name).
//Please note the structure: it should be always like this - names as
//string literals in an array's initializer
//with a terminating 0 - that's what our rootxx.cxx and rootx-cocoa.mm expect.
//The array (or its elements actually) has an internal linkage
//(it has a definition here, not in rootxx.cxx or rootx-cocoa.mm files.
//Please, do not modify this file.
const char * gROOTCoreTeam[] = {
"""
header2 = r"""
0};
}
}
#endif
"""
class ParserState:
#waiting must be the first, done and errorFound the last.
waiting = 0 #initial idle state
started = 1 #<table> found, waiting for a header row.
parsingHeaderRow = 2
parsingHeaderCell = 3
parsingRow = 4 #we've parsed a header and now reading the table.
parsingCell = 5
done = 6 #we stopped after the first table.
errorFound = 7 #something bad happend.
class GitStatsParser(HTMLParser) :
def __init__(self) :
HTMLParser.__init__(self)
self.table = []
self.state = ParserState.waiting
#a couple of aux. functions.
def isParsing(self) :
return self.state > ParserState.waiting and self.state < ParserState.done
def isParsingCell(self) :
return self.state == ParserState.parsingCell or self.state == ParserState.parsingHeaderCell
#start tags. we are interested only in the first <table> and
#any of <tr>, <th>, <td> (in the correct order and place).
def handle_starttag(self, tag, attrs):
if self.state >= ParserState.done :#done or error.
return
if tag == 'table' :
#we need only one table, when the state is 'waiting'.
if self.state == ParserState.waiting :
self.state = ParserState.started
else :
#any other state and <table> means an error.
self.state = ParserState.errorFound
elif not self.isParsing() :#ignore tags outside of our table.
return
if tag == 'tr' :
if self.state == ParserState.parsingRow :
#we can be here only after a valid header row.
self.currentColumn = 0
#header is <tr><th>...</th>...<th>...</th></tr>
elif self.state == ParserState.started :
#<tr> in every other state is an error.
self.state = ParserState.parsingHeaderRow
self.nFields = 0
self.authorIndex = -1
else :
self.state = ParserState.errorFound
elif tag == 'td' :
if self.state == ParserState.parsingRow :
#valid td can happen only inside a table's row.
self.state = ParserState.parsingCell
self.cellData = ''
else :
self.state = ParserState.errorFound
elif tag == 'th' :
if self.state == ParserState.parsingHeaderRow :
self.state = ParserState.parsingHeaderCell
self.cellData = ''
else :
self.state = ParserState.errorFound
def handle_endtag(self, tag) :
#we need only the first </table> or any of </th>, </tr>, <td>.
if not self.isParsing() : #wating, done or error.
return
if tag == 'table' :
if self.state == ParserState.parsingRow :
self.state = ParserState.done
else :
#??? unexpected </table> !!!
self.state = ParserState.errorFound
elif tag == 'th' :
#th outside of parsingHeader is an error.
if self.state == ParserState.parsingHeaderCell :
self.state = ParserState.parsingHeaderRow
if self.cellData.strip().lower() == 'author' :
if self.authorIndex == -1 :
self.authorIndex = self.nFields
else :#'Author' more than once.
self.state = ParserState.errorFound
self.nFields += 1
else :
self.state = ParserState.errorFound
elif tag == 'tr' :#</tr> must close <tr> only.
if self.state == ParserState.parsingRow :
if self.nFields != self.currentColumn :
self.state = ParserState.errorFound
elif self.state == ParserState.parsingHeaderRow :
if self.authorIndex == -1 or not self.nFields :
self.state = ParserState.errorFound
else :
self.state = ParserState.parsingRow
self.currentColumn = 0
else :
self.state = ParserState.errorFound
elif tag == 'td' :#</td> must fo after <td>
if self.state == ParserState.parsingCell :
if self.currentColumn == self.authorIndex :
#we got a name!!!
name = self.cellData.strip()
if name :
self.table.append(name)
self.state = ParserState.parsingRow
self.currentColumn += 1
else :
self.state = ParserState.errorFound
def handle_data(self, data) :
if self.state == ParserState.parsingHeaderCell or self.state == ParserState.parsingCell :
self.cellData += data
#_____________________________________________________________________
def sort_predicate(x, y) :
#Sort names using a family name.
name1 = x.split(' ')
name2 = y.split(' ')
if len(name1) == 2 and len(name2) == 2 :
return cmp(name1[1], name2[1])
return cmp(x, y)
#_____________________________________________________________________
def generate_rootheader(names) :
if names :
output = open("rootcoreteam.h", "w")
output.write(header1)
coreTeam = ''
for name in names :
if coreTeam :
coreTeam += ',\n'
coreTeam += '\t"' + name + '"'
coreTeam += ','
output.write(coreTeam)
output.write(header2)
#_____________________________________________________________________
def main() :
try :
url = "http://root.cern.ch/gitstats/authors.html"
html = urlopen(url).read()
if html :
parser = GitStatsParser()
parser.feed(html)
if parser.state != ParserState.errorFound and parser.table :
names = parser.table
#fix some problems:
if 'CristinaCristescu' in names :
names.remove('CristinaCristescu')
names.append('Cristina Cristescu')
if 'Stefan Roiser' in names :
names.remove('Stefan Roiser')
names.append('Valeri Onuchine')
#names.sort(sort_predicate)
generate_rootheader(names)
except :
pass
if __name__ == '__main__' :
main()
| lgpl-2.1 |
pabloborrego93/edx-platform | lms/djangoapps/certificates/views/webview.py | 22 | 28049 | # pylint: disable=bad-continuation
"""
Certificate HTML webview.
"""
from datetime import datetime
from uuid import uuid4
import logging
import urllib
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponse, Http404
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_str
from badges.events.course_complete import get_completion_badge
from badges.utils import badges_enabled
from courseware.access import has_access
from edxmako.shortcuts import render_to_response
from edxmako.template import Template
from eventtracking import tracker
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from openedx.core.lib.courses import course_image_url
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from student.models import LinkedInAddToProfileConfiguration
from util import organizations_helpers as organization_api
from util.views import handle_500
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from certificates.api import (
get_active_web_certificate,
get_certificate_url,
emit_certificate_event,
has_html_certificates_enabled,
get_certificate_template,
get_certificate_header_context,
get_certificate_footer_context,
)
from certificates.models import (
GeneratedCertificate,
CertificateStatuses,
CertificateHtmlViewConfiguration,
CertificateSocialNetworks)
log = logging.getLogger(__name__)
def get_certificate_description(mode, certificate_type, platform_name):
"""
:return certificate_type_description on the basis of current mode
"""
certificate_type_description = None
if mode == 'honor':
# Translators: This text describes the 'Honor' course certificate type.
certificate_type_description = _("An {cert_type} certificate signifies that a "
"learner has agreed to abide by the honor code established by {platform_name} "
"and has completed all of the required tasks for this course under its "
"guidelines.").format(cert_type=certificate_type,
platform_name=platform_name)
elif mode == 'verified':
# Translators: This text describes the 'ID Verified' course certificate type, which is a higher level of
# verification offered by edX. This type of verification is useful for professional education/certifications
certificate_type_description = _("A {cert_type} certificate signifies that a "
"learner has agreed to abide by the honor code established by {platform_name} "
"and has completed all of the required tasks for this course under its "
"guidelines. A {cert_type} certificate also indicates that the "
"identity of the learner has been checked and "
"is valid.").format(cert_type=certificate_type,
platform_name=platform_name)
elif mode == 'xseries':
# Translators: This text describes the 'XSeries' course certificate type. An XSeries is a collection of
# courses related to each other in a meaningful way, such as a specific topic or theme, or even an organization
certificate_type_description = _("An {cert_type} certificate demonstrates a high level of "
"achievement in a program of study, and includes verification of "
"the student's identity.").format(cert_type=certificate_type)
return certificate_type_description
def _update_certificate_context(context, user_certificate, platform_name):
"""
Build up the certificate web view context using the provided values
(Helper method to keep the view clean)
"""
# Populate dynamic output values using the course/certificate data loaded above
certificate_type = context.get('certificate_type')
# Override the defaults with any mode-specific static values
context['certificate_id_number'] = user_certificate.verify_uuid
context['certificate_verify_url'] = "{prefix}{uuid}{suffix}".format(
prefix=context.get('certificate_verify_url_prefix'),
uuid=user_certificate.verify_uuid,
suffix=context.get('certificate_verify_url_suffix')
)
# Translators: The format of the date includes the full name of the month
context['certificate_date_issued'] = _('{month} {day}, {year}').format(
month=user_certificate.modified_date.strftime("%B"),
day=user_certificate.modified_date.day,
year=user_certificate.modified_date.year
)
# Translators: This text represents the verification of the certificate
context['document_meta_description'] = _('This is a valid {platform_name} certificate for {user_name}, '
'who participated in {partner_short_name} {course_number}').format(
platform_name=platform_name,
user_name=context['accomplishment_copy_name'],
partner_short_name=context['organization_short_name'],
course_number=context['course_number']
)
# Translators: This text is bound to the HTML 'title' element of the page and appears in the browser title bar
context['document_title'] = _("{partner_short_name} {course_number} Certificate | {platform_name}").format(
partner_short_name=context['organization_short_name'],
course_number=context['course_number'],
platform_name=platform_name
)
# Translators: This text fragment appears after the student's name (displayed in a large font) on the certificate
# screen. The text describes the accomplishment represented by the certificate information displayed to the user
context['accomplishment_copy_description_full'] = _("successfully completed, received a passing grade, and was "
"awarded this {platform_name} {certificate_type} "
"Certificate of Completion in ").format(
platform_name=platform_name,
certificate_type=context.get("certificate_type"))
certificate_type_description = get_certificate_description(user_certificate.mode, certificate_type, platform_name)
if certificate_type_description:
context['certificate_type_description'] = certificate_type_description
# Translators: This text describes the purpose (and therefore, value) of a course certificate
context['certificate_info_description'] = _("{platform_name} acknowledges achievements through "
"certificates, which are awarded for course activities "
"that {platform_name} students complete.").format(
platform_name=platform_name,
tos_url=context.get('company_tos_url'),
verified_cert_url=context.get('company_verified_certificate_url'))
def _update_context_with_basic_info(context, course_id, platform_name, configuration):
"""
Updates context dictionary with basic info required before rendering simplest
certificate templates.
"""
context['platform_name'] = platform_name
context['course_id'] = course_id
# Update the view context with the default ConfigurationModel settings
context.update(configuration.get('default', {}))
# Translators: 'All rights reserved' is a legal term used in copyrighting to protect published content
reserved = _("All rights reserved")
context['copyright_text'] = u'© {year} {platform_name}. {reserved}.'.format(
year=settings.COPYRIGHT_YEAR,
platform_name=platform_name,
reserved=reserved
)
# Translators: This text is bound to the HTML 'title' element of the page and appears
# in the browser title bar when a requested certificate is not found or recognized
context['document_title'] = _("Invalid Certificate")
# Translators: The & characters represent an ampersand character and can be ignored
context['company_tos_urltext'] = _("Terms of Service & Honor Code")
# Translators: A 'Privacy Policy' is a legal document/statement describing a website's use of personal information
context['company_privacy_urltext'] = _("Privacy Policy")
# Translators: This line appears as a byline to a header image and describes the purpose of the page
context['logo_subtitle'] = _("Certificate Validation")
# Translators: Accomplishments describe the awards/certifications obtained by students on this platform
context['accomplishment_copy_about'] = _('About {platform_name} Accomplishments').format(
platform_name=platform_name
)
# Translators: This line appears on the page just before the generation date for the certificate
context['certificate_date_issued_title'] = _("Issued On:")
# Translators: The Certificate ID Number is an alphanumeric value unique to each individual certificate
context['certificate_id_number_title'] = _('Certificate ID Number')
context['certificate_info_title'] = _('About {platform_name} Certificates').format(
platform_name=platform_name
)
context['certificate_verify_title'] = _("How {platform_name} Validates Student Certificates").format(
platform_name=platform_name
)
# Translators: This text describes the validation mechanism for a certificate file (known as GPG security)
context['certificate_verify_description'] = _('Certificates issued by {platform_name} are signed by a gpg key so '
'that they can be validated independently by anyone with the '
'{platform_name} public key. For independent verification, '
'{platform_name} uses what is called a '
'"detached signature""".').format(platform_name=platform_name)
context['certificate_verify_urltext'] = _("Validate this certificate for yourself")
# Translators: This text describes (at a high level) the mission and charter the edX platform and organization
context['company_about_description'] = _("{platform_name} offers interactive online classes and MOOCs.").format(
platform_name=platform_name)
context['company_about_title'] = _("About {platform_name}").format(platform_name=platform_name)
context['company_about_urltext'] = _("Learn more about {platform_name}").format(platform_name=platform_name)
context['company_courselist_urltext'] = _("Learn with {platform_name}").format(platform_name=platform_name)
context['company_careers_urltext'] = _("Work at {platform_name}").format(platform_name=platform_name)
context['company_contact_urltext'] = _("Contact {platform_name}").format(platform_name=platform_name)
# Translators: This text appears near the top of the certficate and describes the guarantee provided by edX
context['document_banner'] = _("{platform_name} acknowledges the following student accomplishment").format(
platform_name=platform_name
)
def _update_course_context(request, context, course, platform_name):
"""
Updates context dictionary with course info.
"""
context['full_course_image_url'] = request.build_absolute_uri(course_image_url(course))
course_title_from_cert = context['certificate_data'].get('course_title', '')
accomplishment_copy_course_name = course_title_from_cert if course_title_from_cert else course.display_name
context['accomplishment_copy_course_name'] = accomplishment_copy_course_name
course_number = course.display_coursenumber if course.display_coursenumber else course.number
context['course_number'] = course_number
if context['organization_long_name']:
# Translators: This text represents the description of course
context['accomplishment_copy_course_description'] = _('a course of study offered by {partner_short_name}, '
'an online learning initiative of '
'{partner_long_name}.').format(
partner_short_name=context['organization_short_name'],
partner_long_name=context['organization_long_name'],
platform_name=platform_name)
else:
# Translators: This text represents the description of course
context['accomplishment_copy_course_description'] = _('a course of study offered by '
'{partner_short_name}.').format(
partner_short_name=context['organization_short_name'],
platform_name=platform_name)
def _update_social_context(request, context, course, user, user_certificate, platform_name):
"""
Updates context dictionary with info required for social sharing.
"""
share_settings = configuration_helpers.get_value("SOCIAL_SHARING_SETTINGS", settings.SOCIAL_SHARING_SETTINGS)
context['facebook_share_enabled'] = share_settings.get('CERTIFICATE_FACEBOOK', False)
context['facebook_app_id'] = configuration_helpers.get_value("FACEBOOK_APP_ID", settings.FACEBOOK_APP_ID)
context['facebook_share_text'] = share_settings.get(
'CERTIFICATE_FACEBOOK_TEXT',
_("I completed the {course_title} course on {platform_name}.").format(
course_title=context['accomplishment_copy_course_name'],
platform_name=platform_name
)
)
context['twitter_share_enabled'] = share_settings.get('CERTIFICATE_TWITTER', False)
context['twitter_share_text'] = share_settings.get(
'CERTIFICATE_TWITTER_TEXT',
_("I completed a course at {platform_name}. Take a look at my certificate.").format(
platform_name=platform_name
)
)
share_url = request.build_absolute_uri(get_certificate_url(course_id=course.id, uuid=user_certificate.verify_uuid))
context['share_url'] = share_url
twitter_url = ''
if context.get('twitter_share_enabled', False):
twitter_url = 'https://twitter.com/intent/tweet?text={twitter_share_text}&url={share_url}'.format(
twitter_share_text=smart_str(context['twitter_share_text']),
share_url=urllib.quote_plus(smart_str(share_url))
)
context['twitter_url'] = twitter_url
context['linked_in_url'] = None
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
linkedin_share_enabled = share_settings.get('CERTIFICATE_LINKEDIN', linkedin_config.enabled)
if linkedin_share_enabled:
context['linked_in_url'] = linkedin_config.add_to_profile_url(
course.id,
course.display_name,
user_certificate.mode,
smart_str(share_url)
)
def _update_context_with_user_info(context, user, user_certificate):
"""
Updates context dictionary with user related info.
"""
user_fullname = user.profile.name
context['username'] = user.username
context['course_mode'] = user_certificate.mode
context['accomplishment_user_id'] = user.id
context['accomplishment_copy_name'] = user_fullname
context['accomplishment_copy_username'] = user.username
context['accomplishment_more_title'] = _("More Information About {user_name}'s Certificate:").format(
user_name=user_fullname
)
# Translators: This line is displayed to a user who has completed a course and achieved a certification
context['accomplishment_banner_opening'] = _("{fullname}, you earned a certificate!").format(
fullname=user_fullname
)
# Translators: This line congratulates the user and instructs them to share their accomplishment on social networks
context['accomplishment_banner_congrats'] = _("Congratulations! This page summarizes what "
"you accomplished. Show it off to family, friends, and colleagues "
"in your social and professional networks.")
# Translators: This line leads the reader to understand more about the certificate that a student has been awarded
context['accomplishment_copy_more_about'] = _("More about {fullname}'s accomplishment").format(
fullname=user_fullname
)
def _get_user_certificate(request, user, course_key, course, preview_mode=None):
"""
Retrieves user's certificate from db. Creates one in case of preview mode.
Returns None if there is no certificate generated for given user
otherwise returns `GeneratedCertificate` instance.
"""
user_certificate = None
if preview_mode:
# certificate is being previewed from studio
if has_access(request.user, 'instructor', course) or has_access(request.user, 'staff', course):
user_certificate = GeneratedCertificate(
mode=preview_mode,
verify_uuid=unicode(uuid4().hex),
modified_date=datetime.now().date()
)
else:
# certificate is being viewed by learner or public
try:
user_certificate = GeneratedCertificate.eligible_certificates.get(
user=user,
course_id=course_key,
status=CertificateStatuses.downloadable
)
except GeneratedCertificate.DoesNotExist:
pass
return user_certificate
def _track_certificate_events(request, context, course, user, user_certificate):
"""
Tracks web certificate view related events.
"""
# Badge Request Event Tracking Logic
course_key = course.location.course_key
if 'evidence_visit' in request.GET:
badge_class = get_completion_badge(course_key, user)
if not badge_class:
log.warning('Visit to evidence URL for badge, but badges not configured for course "%s"', course_key)
badges = []
else:
badges = badge_class.get_for_user(user)
if badges:
# There should only ever be one of these.
badge = badges[0]
tracker.emit(
'edx.badge.assertion.evidence_visited',
{
'badge_name': badge.badge_class.display_name,
'badge_slug': badge.badge_class.slug,
'badge_generator': badge.backend,
'issuing_component': badge.badge_class.issuing_component,
'user_id': user.id,
'course_id': unicode(course_key),
'enrollment_mode': badge.badge_class.mode,
'assertion_id': badge.id,
'assertion_image_url': badge.image_url,
'assertion_json_url': badge.assertion_url,
'issuer': badge.data.get('issuer'),
}
)
else:
log.warn(
"Could not find badge for %s on course %s.",
user.id,
course_key,
)
# track certificate evidence_visited event for analytics when certificate_user and accessing_user are different
if request.user and request.user.id != user.id:
emit_certificate_event('evidence_visited', user, unicode(course.id), course, {
'certificate_id': user_certificate.verify_uuid,
'enrollment_mode': user_certificate.mode,
'social_network': CertificateSocialNetworks.linkedin
})
def _render_certificate_template(request, context, course, user_certificate):
"""
Picks appropriate certificate templates and renders it.
"""
if settings.FEATURES.get('CUSTOM_CERTIFICATE_TEMPLATES_ENABLED', False):
custom_template = get_certificate_template(course.id, user_certificate.mode)
if custom_template:
template = Template(
custom_template,
output_encoding='utf-8',
input_encoding='utf-8',
default_filters=['decode.utf8'],
encoding_errors='replace',
)
context = RequestContext(request, context)
return HttpResponse(template.render(context))
return render_to_response("certificates/valid.html", context)
def _update_configuration_context(context, configuration):
"""
Site Configuration will need to be able to override any hard coded
content that was put into the context in the
_update_certificate_context() call above. For example the
'company_about_description' talks about edX, which we most likely
do not want to keep in configurations.
So we need to re-apply any configuration/content that
we are sourcing from the database. This is somewhat duplicative of
the code at the beginning of this method, but we
need the configuration at the top as some error code paths
require that to be set up early on in the pipeline
"""
config_key = configuration_helpers.get_value('domain_prefix')
config = configuration.get("microsites", {})
if config_key and config:
context.update(config.get(config_key, {}))
def _update_badge_context(context, course, user):
"""
Updates context with badge info.
"""
badge = None
if badges_enabled() and course.issue_badges:
badges = get_completion_badge(course.location.course_key, user).get_for_user(user)
if badges:
badge = badges[0]
context['badge'] = badge
def _update_organization_context(context, course):
"""
Updates context with organization related info.
"""
partner_long_name, organization_logo = None, None
partner_short_name = course.display_organization if course.display_organization else course.org
organizations = organization_api.get_course_organizations(course_id=course.id)
if organizations:
#TODO Need to add support for multiple organizations, Currently we are interested in the first one.
organization = organizations[0]
partner_long_name = organization.get('name', partner_long_name)
partner_short_name = organization.get('short_name', partner_short_name)
organization_logo = organization.get('logo', None)
context['organization_long_name'] = partner_long_name
context['organization_short_name'] = partner_short_name
context['accomplishment_copy_course_org'] = partner_short_name
context['organization_logo'] = organization_logo
def render_cert_by_uuid(request, certificate_uuid):
"""
This public view generates an HTML representation of the specified certificate
"""
try:
certificate = GeneratedCertificate.eligible_certificates.get(
verify_uuid=certificate_uuid,
status=CertificateStatuses.downloadable
)
return render_html_view(request, certificate.user.id, unicode(certificate.course_id))
except GeneratedCertificate.DoesNotExist:
raise Http404
@handle_500(
template_path="certificates/server-error.html",
test_func=lambda request: request.GET.get('preview', None)
)
def render_html_view(request, user_id, course_id):
"""
This public view generates an HTML representation of the specified user and course
If a certificate is not available, we display a "Sorry!" screen instead
"""
try:
user_id = int(user_id)
except ValueError:
raise Http404
preview_mode = request.GET.get('preview', None)
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
configuration = CertificateHtmlViewConfiguration.get_config()
# Create the initial view context, bootstrapping with Django settings and passed-in values
context = {}
_update_context_with_basic_info(context, course_id, platform_name, configuration)
invalid_template_path = 'certificates/invalid.html'
# Kick the user back to the "Invalid" screen if the feature is disabled
if not has_html_certificates_enabled(course_id):
log.info(
"Invalid cert: HTML certificates disabled for %s. User id: %d",
course_id,
user_id,
)
return render_to_response(invalid_template_path, context)
# Load the course and user objects
try:
course_key = CourseKey.from_string(course_id)
user = User.objects.get(id=user_id)
course = modulestore().get_course(course_key)
# For any other expected exceptions, kick the user back to the "Invalid" screen
except (InvalidKeyError, ItemNotFoundError, User.DoesNotExist) as exception:
error_str = (
"Invalid cert: error finding course %s or user with id "
"%d. Specific error: %s"
)
log.info(error_str, course_id, user_id, str(exception))
return render_to_response(invalid_template_path, context)
# Load user's certificate
user_certificate = _get_user_certificate(request, user, course_key, course, preview_mode)
if not user_certificate:
log.info(
"Invalid cert: User %d does not have eligible cert for %s.",
user_id,
course_id,
)
return render_to_response(invalid_template_path, context)
# Get the active certificate configuration for this course
# If we do not have an active certificate, we'll need to send the user to the "Invalid" screen
# Passing in the 'preview' parameter, if specified, will return a configuration, if defined
active_configuration = get_active_web_certificate(course, preview_mode)
if active_configuration is None:
log.info(
"Invalid cert: course %s does not have an active configuration. User id: %d",
course_id,
user_id,
)
return render_to_response(invalid_template_path, context)
context['certificate_data'] = active_configuration
# Append/Override the existing view context values with any mode-specific ConfigurationModel values
context.update(configuration.get(user_certificate.mode, {}))
# Append organization info
_update_organization_context(context, course)
# Append course info
_update_course_context(request, context, course, platform_name)
# Append user info
_update_context_with_user_info(context, user, user_certificate)
# Append social sharing info
_update_social_context(request, context, course, user, user_certificate, platform_name)
# Append/Override the existing view context values with certificate specific values
_update_certificate_context(context, user_certificate, platform_name)
# Append badge info
_update_badge_context(context, course, user)
# Append site configuration overrides
_update_configuration_context(context, configuration)
# Add certificate header/footer data to current context
context.update(get_certificate_header_context(is_secure=request.is_secure()))
context.update(get_certificate_footer_context())
# Append/Override the existing view context values with any course-specific static values from Advanced Settings
context.update(course.cert_html_view_overrides)
# Track certificate view events
_track_certificate_events(request, context, course, user, user_certificate)
# FINALLY, render appropriate certificate
return _render_certificate_template(request, context, course, user_certificate)
| agpl-3.0 |
rojkov/taskqueue | test.py | 1 | 1623 | #!/usr/bin/env python
import pika
import sys
from optparse import OptionParser
from taskqueue.confparser import ConfigParser, NoSectionError
def parse_cmdline(defaults):
"""Parse commandline options."""
parser = OptionParser()
parser.add_option("-c", "--config", dest="config",
default="/etc/taskqueue/config.ini",
help="path to config file")
parser.add_option("-t", "--content-type", dest="ctype",
default="text/plain",
help="content type of AMQP message.")
return parser.parse_args()
options, args = parse_cmdline({})
config = ConfigParser()
config.read(options.config)
amqp_items = dict(config.items("amqp"))
amqp_host = amqp_items.get("host", "localhost")
amqp_user = amqp_items.get("user", "guest")
amqp_passwd = amqp_items.get("passwd", "guest")
amqp_vhost = amqp_items.get("vhost", "/")
credentials = pika.PlainCredentials(amqp_user, amqp_passwd)
connection = pika.BlockingConnection(pika.ConnectionParameters(
credentials=credentials,
host=amqp_host,
virtual_host=amqp_vhost))
channel = connection.channel()
channel.queue_declare(queue='taskqueue', durable=True)
message = ' '.join(args) or "Hello World!"
channel.basic_publish(exchange='',
routing_key='taskqueue',
body=message,
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
content_type=options.ctype
))
print " [x] Sent %r" % (message,)
connection.close()
| gpl-2.0 |
datapackages/jsontableschema-py | tests/test_field.py | 2 | 5611 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import json
import pytest
import requests
from functools import partial
from tableschema import Field, exceptions
# Constants
DESCRIPTOR_MIN = {'name': 'id'}
DESCRIPTOR_MAX = {
'name': 'id',
'type': 'integer',
'format': 'default',
'constraints': {'required': True},
}
# Tests [general]
def test_descriptor(apply_defaults):
assert Field(DESCRIPTOR_MIN).descriptor == apply_defaults(DESCRIPTOR_MIN)
def test_name():
assert Field(DESCRIPTOR_MIN).name == 'id'
def test_type():
assert Field(DESCRIPTOR_MIN).type == 'string'
assert Field(DESCRIPTOR_MAX).type == 'integer'
def test_format():
assert Field(DESCRIPTOR_MIN).format == 'default'
assert Field(DESCRIPTOR_MAX).format == 'default'
def test_constraints():
assert Field(DESCRIPTOR_MIN).constraints == {}
assert Field(DESCRIPTOR_MAX).constraints == {'required': True}
def test_required():
assert Field(DESCRIPTOR_MIN).required == False
assert Field(DESCRIPTOR_MAX).required == True
def test_cast_value():
assert Field(DESCRIPTOR_MAX).cast_value('1') == 1
def test_cast_value_constraint_error():
with pytest.raises(exceptions.CastError):
Field(DESCRIPTOR_MAX).cast_value('')
def test_cast_value_constraints_false():
assert Field(DESCRIPTOR_MIN).cast_value('', constraints=False) == None
def test_cast_value_null_with_missing_values():
field = Field({'name': 'name', 'type': 'number'}, missing_values=['null'])
assert field.cast_value('null') == None
def test_test_value():
assert Field(DESCRIPTOR_MAX).test_value('1') == True
assert Field(DESCRIPTOR_MAX).test_value('string') == False
assert Field(DESCRIPTOR_MAX).test_value('') == False
def test_test_value_constraints_false():
assert Field(DESCRIPTOR_MIN).test_value('', constraints=False) == True
def test_missing_values():
assert Field(DESCRIPTOR_MIN).missing_values == ['']
assert Field(DESCRIPTOR_MIN, missing_values=['-']).missing_values == ['-']
# Tests [missingValues]
def test_string_missingValues():
field = Field({
'name': 'name',
'type': 'string',
}, missing_values=['', 'NA', 'N/A'])
cast = field.cast_value
assert cast('') == None
assert cast('NA') == None
assert cast('N/A') == None
def test_number_missingValues():
field = Field({
'name': 'name',
'type': 'number',
}, missing_values=['', 'NA', 'N/A'])
cast = field.cast_value
assert cast('') == None
assert cast('NA') == None
assert cast('N/A') == None
# Tests [constraints]
def test_test_value_required():
field = Field({
'name': 'name',
'type': 'string',
'constraints': {'required': True}
}, missing_values=['', 'NA', 'N/A'])
test = partial(field.test_value, constraints=['required'])
assert test('test') == True
assert test('null') == True
assert test('none') == True
assert test('nil') == True
assert test('nan') == True
assert test('NA') == False
assert test('N/A') == False
assert test('-') == True
assert test('') == False
assert test(None) == False
def test_test_value_pattern():
field = Field({
'name': 'name',
'type': 'string',
'constraints': {'pattern': '3.*'}
})
test = partial(field.test_value, constraints=['pattern'])
assert test('3') == True
assert test('321') == True
assert test('123') == False
def test_test_value_unique():
field = Field({
'name': 'name',
'type': 'integer',
'constraints': {'unique': True}
})
test = partial(field.test_value, constraints=['unique'])
assert test(30000) == True
assert test('bad') == False
def test_test_value_enum():
field = Field({
'name': 'name',
'type': 'integer',
'constraints': {'enum': ['1', '2', '3']}
})
test = partial(field.test_value, constraints=['enum'])
assert test('1') == True
assert test(1) == True
assert test('4') == False
assert test(4) == False
def test_test_value_minimum():
field = Field({
'name': 'name',
'type': 'integer',
'constraints': {'minimum': 1}
})
test = partial(field.test_value, constraints=['minimum'])
assert test('2') == True
assert test(2) == True
assert test('1') == True
assert test(1) == True
assert test('0') == False
assert test(0) == False
def test_test_value_maximum():
field = Field({
'name': 'name',
'type': 'integer',
'constraints': {'maximum': 1}
})
test = partial(field.test_value, constraints=['maximum'])
assert test('0') == True
assert test(0) == True
assert test('1') == True
assert test(1) == True
assert test('2') == False
assert test(2) == False
def test_test_value_minLength():
field = Field({
'name': 'name',
'type': 'string',
'constraints': {'minLength': 1}
})
test = partial(field.test_value, constraints=['minLength'])
assert test('ab') == True
assert test('a') == True
# Null value passes
assert test('') == True
def test_test_value_maxLength():
field = Field({
'name': 'name',
'type': 'string',
'constraints': {'maxLength': 1}
})
test = partial(field.test_value, constraints=['maxLength'])
assert test('') == True
assert test('a') == True
assert test('ab') == False
| mit |
hewie7/tool-wrapper-web | app_uploader/urls.py | 1 | 1705 | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
(r'^test$','atap.views.test'),
(r'^$', 'atap.views.list_tools'),
(r'^accounts/login/$', 'atap.views.login'),
(r'^accounts/logout/$', "atap.views.logout"),
(r'^accounts/changepwd/$', 'atap.views.changepwd'),
url(r'^docs/$', 'atap.views.docs', name="help-page"),
url(r'^tools/$', 'atap.views.list_tools', name="tool-list-page"),
url(r'^tool/add/$', 'atap.views.add_tool', name="tool-add-page"),
url(r'^tool/(\d+)/$', 'atap.views.modify_tool', name='tool-modify-page'),
url(r'^view/(\d+)/$', 'atap.views.view_tool', name="tool-detail-page"),
url(r'^init/(\d+)/$', 'atap.views.init', name="init-tool"),
url(r'^del/(\d+)/$','atap.views.delete_tool', name="del-tool"),
url(r'^tool/(\d+)/input/add/$', 'atap.views.add_input', name="input-add-page"),
url(r'^tool/(\d+)/input/(\d+)/$', 'atap.views.modify_input', name="input-modify-page"),
url(r'^tool/(\d+)/input/(\d+)/del$','atap.views.delete_input',name="input-delete-page"),
url(r'^tool/(\d+)/param/add/$', 'atap.views.add_param', name="param-add-page"),
url(r'^tool/(\d+)/param/(\d+)/$', 'atap.views.modify_param', name="param-modify-page"),
url(r'^tool/(\d+)/param/(\d+)/del$', 'atap.views.delete_param', name="param-delete-page"),
url(r'^tool/(\d+)/output/add/$', 'atap.views.add_output', name="output-add-page"),
url(r'^tool/(\d+)/output/(\d+)/$', 'atap.views.modify_output', name="output-modify-page"),
url(r'^tool/(\d+)/output/(\d+)/del$', 'atap.views.delete_output', name="output-delete-page"),
)
| unlicense |
dibondar/PyPhotonicReagents | libs/dev/spectrometer_shamrock.py | 1 | 17257 | ########################################################################
#
# This module contains classes for controlling and GUI representation of
# spectrometer Shamrok SR303i connected to camera iDUS DU401A-BV
#
########################################################################
from libs.gui.hardware_control import HardwareGUIControl
from libs.dev.basic_device import BasicDevice
import ctypes, wx, os, multiprocessing
from multiprocessing.sharedctypes import RawArray
import numpy as np
from libs.dev.consts import *
########################################################################
#
# Manager class that communicates with the process where
# the device resides
#
########################################################################
class ManagerShamrockSpectrometer :
"""
Class that manges the Shamrock spectrometer
"""
def __init__ (self) :
# Create the lock for device
self.lock = multiprocessing.Lock()
# Create a pipe for communication
self.parent_connection, self.child_connection = multiprocessing.Pipe()
# Create the buffer
self.spectrometer_buffer = ShamrockSpectrometer.AllocateBuffer()
def __del__ (self) :
self.parent_connection.close()
self.child_connection.close()
def start(self) :
"""
Start the process controlling the spectrometer
"""
p = ShamrockSpectrometer(self.child_connection, self.spectrometer_buffer)
p.start()
return p
def run(self, command, arguments=None) :
"""
Send the command to the spectrometer through the pipe
"""
self.lock.acquire()
self.parent_connection.send( (command, arguments) )
result = self.parent_connection.recv()
self.lock.release()
return result
def exit(self) :
"""
Close the process
"""
return self.run("Exit")
def SetSettings (self, settings) :
"""
Set settings for the spectrometer and camera
"""
return self.run("SetSettings", settings)
def AcquiredData (self) :
"""
Get the spectral data
"""
if self.run("AcquiredData") == RETURN_FAIL : print "Spectrometer Acquisition failed"
return np.frombuffer(self.spectrometer_buffer, dtype=np.dtype(ctypes.c_long))
def GetWavelengths (self) :
"""
Get the calibration data.
"""
return self.run("GetWavelengths")
def GetTemperature (self) : return self.run("GetTemperature")
########################################################################
#
# Process where the device resides
#
########################################################################
########################################################################
# Constants from ShamrockCIF.H
SHAMROCK_SUCCESS = 20202
# Constants from ATMCD32D.H
DRV_SUCCESS = 20002
DRV_VXDNOTINSTALLED = 20003
DRV_ERROR_FILELOAD = 20006
DRV_ERROR_PAGELOCK = 20010
DRV_ERROR_ACK = 20013
DRV_ACQ_BUFFER = 20018
DRV_KINETIC_TIME_NOT_MET = 20022
DRV_ACCUM_TIME_NOT_MET = 20023
DRV_NO_NEW_DATA = 20024
DRV_SPOOLERROR = 20026
DRV_TEMP_OFF = 20034
DRV_TEMP_NOT_STABILIZED = 20035
DRV_TEMP_STABILIZED = 20036
DRV_TEMP_NOT_REACHED = 20037
DRV_TEMP_DRIFT = 20040
DRV_FLEXERROR = 20053
DRV_P1INVALID = 20066
DRV_P2INVALID = 20067
DRV_INIERROR = 20070
DRV_COFERROR = 20071
DRV_ACQUIRING = 20072
DRV_IDLE = 20073
DRV_TEMPCYCLE = 20074
DRV_NOT_INITIALIZED = 20075
DRV_USBERROR = 20089
DRV_ERROR_NOCAMERA = 20990
DRV_NOT_SUPPORTED = 20991
########################################################################
AndorDriverFolder = "C:/Program Files/Andor SOLIS/Drivers/"
ShamrockDriverFolder = "C:/Program Files/Andor SOLIS/Drivers/Shamrock/"
########################################################################
# Transcript of Error messages
CameraErrorMsg = { DRV_VXDNOTINSTALLED : "VxD not loaded",
DRV_INIERROR : "Unable to load 'DETECTOR.INI'", DRV_COFERROR : "Unable to load '*.COF'",
DRV_FLEXERROR : "Unable to load '*.RBF'", DRV_ERROR_ACK : "Unable to communicate with card",
DRV_ERROR_FILELOAD : "Unable to load '*.COF' or '*.RBF' files", DRV_ERROR_PAGELOCK : "Unable to acquire lock on requested memory",
DRV_USBERROR : "Unable to detect USB device or not USB2.0", DRV_ERROR_NOCAMERA : "No camera found",
DRV_NOT_INITIALIZED : "System not initialized", DRV_ACQUIRING : "Acquisition in progress",
DRV_P1INVALID : "Invalid readout mode passed", DRV_TEMPCYCLE : "Executing temperature cycle",
DRV_ACCUM_TIME_NOT_MET : "Unable to meet Accumulate cycle time", DRV_KINETIC_TIME_NOT_MET : "Unable to meet Kinetic cycle time",
DRV_ACQ_BUFFER : "Computer unable to read the data via the ISA slot", DRV_SPOOLERROR : "Overflow of the spool buffer",
DRV_P2INVALID : "Array size is incorrect", DRV_NO_NEW_DATA : "No acquisition has taken place",
DRV_TEMP_OFF : "Temperature is OFF", DRV_TEMP_STABILIZED : "Temperature has stabilized at set point",
DRV_TEMP_NOT_REACHED : "Temperature has not reached set point", DRV_TEMP_DRIFT : "Temperature had stabilized but has since drifted",
DRV_TEMP_NOT_STABILIZED : "Temperature reached but not stabilized", DRV_NOT_SUPPORTED : "Capability not supported"
}
class ShamrockSpectrometer (BasicDevice) :
"""
Control spectrometer and camera hardware from a separate process
"""
# Resolution of the camera image (these values may need to be adjusted when porting the code)
xpixels = 1600
ypixels = 200
def __init__ (self, pipe, spectrometer_buffer) :
"""
Initialize the spectrometer and camera
"""
BasicDevice.__init__(self, pipe)
# saving the buffer where the spectrum will be saved
self.buffer = spectrometer_buffer
@classmethod
def AllocateBuffer (cls) :
"""
This static method allocates buffer that corresponds to Full Vertical Binning readout mode
"""
return RawArray (ctypes.c_long, cls.xpixels )
def InitializeShamrock (self) :
"""
Initialize Shamrock spectrometer
"""
# Expanding PATH environmental variable
os.environ["PATH"] += os.pathsep + ShamrockDriverFolder
# Loading the spectrometer driver
self.ShamrockLib = ctypes.WinDLL ("ShamrockCIF.dll")
# Initializing
if self.ShamrockLib.ShamrockInitialize(ShamrockDriverFolder) != SHAMROCK_SUCCESS : raise RuntimeError ("Error in ShamrockInitialize!")
"""
# Verifying that there is a single Shamrock spectrometer
totalSpectrometer = ctypes.c_int()
if self.ShamrockLib.ShamrockGetNumberDevices( ctypes.byref(totalSpectrometer) ) != SHAMROCK_SUCCESS : raise RuntimeError ("Error in ShamrockGetNumberDevices!")
if totalSpectrometer.value > 1: raise RuntimeError ("More than one Shamrock spectrometer!")
"""
def InitializeCamera (self) :
"""
Initialize iDus camera
"""
# Expanding PATH environmental variable
os.environ["PATH"] += os.pathsep + AndorDriverFolder
# Loading the spectrometer driver
self.CameraLib = ctypes.WinDLL ("atmcd32d.dll")
# Verifying that there is a single Andor camera
totalCameras = ctypes.c_int()
if self.CameraLib.GetAvailableCameras( ctypes.byref(totalCameras) ) != DRV_SUCCESS :
raise RuntimeError ("Error in GetAvailableCameras")
if totalCameras.value > 1 :
raise RuntimeError ("More than one Andor camera is present")
# Initialize the camera
result = self.CameraLib.Initialize(AndorDriverFolder)
if result != DRV_SUCCESS : raise RuntimeError ("Error in Initialize: %s " % CameraErrorMsg[result])
# Find out the number of pixels for figures
__xpixels__ = ctypes.c_int(); __ypixels__ = ctypes.c_int()
if self.CameraLib.GetDetector( ctypes.byref(__xpixels__), ctypes.byref(__ypixels__) ) != DRV_SUCCESS : raise RuntimeError ("Error in GetDetector")
self.max_x_pixels = __xpixels__.value; self.max_y_pixels = __ypixels__.value
# Check whether the static properties coincide with actual resolution
if self.xpixels != self.max_x_pixels or self.ypixels != self.max_y_pixels :
raise ValueError ("Static properties <xpixels> and <ypixels> of class <Spectrometer> have wrong values. Correct values are %d and %d. Source code must be modified." % ( self.max_x_pixels, self.max_y_pixels))
return RETURN_SUCCESS
def SetSettings (self, settings) :
"""
Assign settings
"""
self.SetCameraSettings(settings); self.SetShamrockSettings(settings)
return RETURN_SUCCESS
def SetCameraSettings (self, settings) :
################ Camera settings ################
# check the temperature range
mintemp = ctypes.c_int(); maxtemp = ctypes.c_int()
result = self.CameraLib.GetTemperatureRange( ctypes.byref(mintemp), ctypes.byref(maxtemp) )
if result != DRV_SUCCESS : raise RuntimeError ("Error in GetTemperatureRange: %s " % CameraErrorMsg[result])
temperature = settings["temperature"]
if temperature > maxtemp.value or temperature < mintemp.value : raise RuntimeError("Requested temperature is out of range")
# Set the temperature
if self.CameraLib.CoolerON() != DRV_SUCCESS : raise RuntimeError ("Error in CoolerON")
if self.CameraLib.SetTemperature (temperature) != DRV_SUCCESS : raise RuntimeError ("Error in SetTemperature")
# Set single scan acquisition mode
result = self.CameraLib.SetAcquisitionMode(1)
if result != DRV_SUCCESS : raise RuntimeError ("Error in SetAcquisitionMode: %s " % CameraErrorMsg[result])
# Set Full Vertical Binning readout mode
result = self.CameraLib.SetReadMode(0)
if result != DRV_SUCCESS : raise RuntimeError ("Error in SetReadMode: %s " % CameraErrorMsg[result])
# Set exposure time (this must be set at the end)
exposure_time = float(settings["exposure_time"])/1000.
result = self.CameraLib.SetExposureTime( ctypes.c_float(exposure_time) )
if result != DRV_SUCCESS : raise RuntimeError ("Error in SetExposureTime: %s " % CameraErrorMsg[result] )
# Retrieve Acquisition timing and compare with requested values
exposure = ctypes.c_float(); accumulate = ctypes.c_float(); kinetic = ctypes.c_float()
result = self.CameraLib.GetAcquisitionTimings( ctypes.byref(exposure), ctypes.byref(accumulate), ctypes.byref(kinetic) )
if result != DRV_SUCCESS : raise RuntimeError ("Error in GetAcquisitionTimings: %s " % CameraErrorMsg[result] )
exposure = exposure.value; accumulate = accumulate.value; kinetic = kinetic.value
if not np.isclose(exposure_time,exposure,rtol=1e-3) :
raise RuntimeError ("Requested exposure time cannot be set. Nearest available value is %f (s)"% exposure)
return RETURN_SUCCESS
def SetShamrockSettings (self, settings) :
""" Shamrock settings """
################ Grating ################
if self.ShamrockLib.ShamrockSetGrating(0, settings["grating"]) != SHAMROCK_SUCCESS :
raise RuntimeError ("Error in ShamrockSetGrating")
grating = ctypes.c_int()
if self.ShamrockLib.ShamrockGetGrating(0,ctypes.byref(grating)) != SHAMROCK_SUCCESS :
raise RuntimeError ("Error in ShamrockGetGrating")
if grating.value != settings["grating"] : raise ValueError ("Grating was not properly set")
################ Print Wavelength Limit ################
min_wavelength = ctypes.c_float(); max_wavelength = ctypes.c_float()
if self.ShamrockLib.ShamrockGetWavelengthLimits(0, grating, ctypes.byref(min_wavelength), ctypes.byref(max_wavelength)) != SHAMROCK_SUCCESS :
raise ValueError ("Error in ShamrockGetWavelengthLimits")
print "Grating %d resolves from %f to %f (nm)" % (grating.value, min_wavelength.value, max_wavelength.value)
################ Wavelength ################
if self.ShamrockLib.ShamrockSetWavelength(0, ctypes.c_float(settings["wavelength"])) != SHAMROCK_SUCCESS :
raise ValueError ("Error in ShamrockSetWavelength")
wavelength = ctypes.c_float()
if self.ShamrockLib.ShamrockGetWavelength(0, ctypes.byref(wavelength)) != SHAMROCK_SUCCESS : raise ValueError ("Error in ShamrockGetWavelength")
if not np.isclose(settings["wavelength"],wavelength.value,rtol=1e-3) :
print "Warning: Wavelength %f (nm) requested, but %f (nm) set\n" % (settings["wavelength"], wavelength.value)
################ Slit width ################
if self.ShamrockLib.ShamrockSetSlit(0, ctypes.c_float(settings["slit_width"])) != SHAMROCK_SUCCESS :
raise ValueError ("Error in ShamrockSetSlit")
slit_width = ctypes.c_float()
if self.ShamrockLib.ShamrockGetSlit(0, ctypes.byref(slit_width)) != SHAMROCK_SUCCESS : raise ValueError ("Error in ShamrockGetSlit")
if not np.isclose(settings["slit_width"],slit_width.value,rtol=1e-3) :
raise ValueError ("Slit width was not properly set")
return RETURN_SUCCESS
def GetTemperature (self, arguments=None) :
"""
Get current temperature
"""
temperature = ctypes.c_int()
result = self.CameraLib.GetTemperature( ctypes.byref(temperature) )
#if result != DRV_TEMP_STABILIZED : print (CameraErrorMsg[result])
return temperature.value
def GetWavelengths (self, arguments=None) :
"""
Return Shamrock wavelengths calibration
"""
npixels = len(self.buffer)
if self.ShamrockLib.ShamrockSetNumberPixels(0,npixels) != SHAMROCK_SUCCESS : raise ValueError("Error in ShamrockSetNumberPixels")
# Get the pixel size for the camera
pixel_width = ctypes.c_float(); pixel_height = ctypes.c_float()
if self.CameraLib.GetPixelSize(ctypes.byref(pixel_width), ctypes.byref(pixel_height)) != DRV_SUCCESS :
raise ValueError ("Error in GetPixelSize")
# Specify the pixel width
if self.ShamrockLib.ShamrockSetPixelWidth(0,pixel_width) != SHAMROCK_SUCCESS : raise ValueError("Error in ShamrockSetPixelWidth")
# Get wave length per pixel
wavelegths = np.zeros(npixels, dtype=np.dtype(ctypes.c_float))
wavelegths_ptr = wavelegths.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
if self.ShamrockLib.ShamrockGetCalibration(0,wavelegths_ptr,npixels) != SHAMROCK_SUCCESS : raise ValueError("Error in ShamrockGetCalibration")
return wavelegths
def StopDevice(self) :
"""
Closing the camera
"""
if self.CameraLib.CoolerOFF() != DRV_SUCCESS : print ("Error in CoolerOFF")
if self.CameraLib.ShutDown() != DRV_SUCCESS : print ("Error in ShutDown")
if self.ShamrockLib.ShamrockClose() != SHAMROCK_SUCCESS : print ("Error in ShamrockClose")
return RETURN_SUCCESS
def AcquiredData (self, arguments=None) :
"""
Acquire data from spectrometer
"""
# Check status
status = ctypes.c_int()
if self.CameraLib.GetStatus( ctypes.byref(status) ) != DRV_SUCCESS : raise RuntimeError("Error in GetStatus")
status = status.value
if status != DRV_IDLE : raise RuntimeError ("Status Error: %s" % CameraErrorMsg[status])
# Camera is ready to accept commands, then begin acquisition
if self.CameraLib.StartAcquisition() != DRV_SUCCESS : raise RuntimeError("Error in StartAcquisition")
# Watling till acquisition finishes
result = self.CameraLib.WaitForAcquisition()
if result != DRV_SUCCESS : raise RuntimeError ("Error in WaitForAcquisition: %s" % CameraErrorMsg[result])
# Moving the Data into the buffer
result = self.CameraLib.GetAcquiredData(self.buffer, len(self.buffer))
if result != DRV_SUCCESS : raise RuntimeError ("Error in GetAcquiredData: %s" % CameraErrorMsg[result])
return RETURN_SUCCESS
def run (self) :
"""
Overloaded function provided by BasicDevice
"""
# Initialize the devices
self.InitializeShamrock(); self.InitializeCamera()
BasicDevice.run(self)
# Closing the device
self.StopDevice()
########################################################################
class ShamrockSpectrometerTab (HardwareGUIControl) :
"""
This class represents a GUI controlling properties of the spectrometer.
"""
def __init__(self, parent) :
HardwareGUIControl.__init__(self, parent)
sizer = wx.BoxSizer(wx.VERTICAL)
################################################
sizer.Add (wx.StaticText(self, label="Exposure time (ms)"), flag=wx.LEFT, border=5)
exposure_time_ctr = wx.SpinCtrl (self, value="20", min=1, max=1e6)
exposure_time_ctr.SetLabel("Exposure time")
sizer.Add (exposure_time_ctr, flag=wx.EXPAND, border=5)
################ Temperature ################
sizer.Add (wx.StaticText(self, label="\nTemperature"), flag=wx.LEFT, border=5)
temperature_ctr = wx.SpinCtrl (self, value="-10", min=-50, max=50)
temperature_ctr.SetLabel ("Temperature")
sizer.Add (temperature_ctr, flag=wx.EXPAND, border=10)
################ Shamrock settings ################
################ Grating ################
sizer.Add (wx.StaticText(self, label="\nGrating #"), flag=wx.LEFT, border=5)
grating_ctr = wx.SpinCtrl (self, value="1", min=1, max=3)
grating_ctr.SetLabel("grating")
sizer.Add (grating_ctr, flag=wx.EXPAND, border=5)
################ Wavelength ################
sizer.Add (wx.StaticText(self, label="\nWavelength (nm)"), flag=wx.LEFT, border=5)
wavelength_ctr = wx.SpinCtrl (self, value="610", min=10, max=1000)
wavelength_ctr.SetLabel("wavelength")
sizer.Add (wavelength_ctr, flag=wx.EXPAND, border=5)
################ Slit width ################
sizer.Add (wx.StaticText(self, label="\nSlit width (um)"), flag=wx.LEFT, border=5)
slit_width_ctr = wx.SpinCtrl (self, value="500", min=0, max=2000)
slit_width_ctr.SetLabel("slit width")
sizer.Add (slit_width_ctr, flag=wx.EXPAND, border=5)
self.SetSizer(sizer)
############### GUI is created, now generate settings ######################
self.CreateSettingsDict()
| bsd-3-clause |
nirmeshk/oh-mainline | vendor/packages/docutils/test/functional/tests/standalone_rst_s5_html_1.py | 18 | 2531 | exec(open('functional/tests/_standalone_rst_defaults.py').read())
# Source and destination file names:
test_source = 'standalone_rst_s5_html.txt'
test_destination = 'standalone_rst_s5_html_1.html'
# Keyword parameters passed to publish_file:
writer_name = 's5_html'
# Settings:
settings_overrides['theme'] = 'small-black'
# local copy of default stylesheet:
settings_overrides['stylesheet_path'] = (
'functional/input/data/html4css1.css')
# Extra functional tests.
# Prefix all names with '_' to avoid confusing `docutils.core.publish_file`.
import filecmp as _filecmp
def _test_more(expected_dir, output_dir, test_case, parameters):
"""Compare ``ui/<theme>`` directories."""
theme = settings_overrides.get('theme', 'default')
expected = '%s/%s/%s' % (expected_dir, 'ui', theme)
output = '%s/%s/%s' % (output_dir, 'ui', theme)
differences, uniques = _compare_directories(expected, output)
parts = []
if differences:
parts.append('The following files differ from the expected output:')
parts.extend(differences)
expected = [path.replace('functional/output/', 'functional/expected/')
for path in differences]
parts.append('Please compare the expected and actual output files:')
parts.extend([' diff %s %s' % tup
for tup in zip(expected, differences)])
parts.append('If the actual output is correct, please replace the '
'expected output files:')
parts.extend([' mv %s %s' % tup
for tup in zip(differences, expected)])
parts.append('and check them in to Subversion:')
parts.extend([' svn commit -m "<comment>" %s' % path
for path in expected])
if uniques:
parts.append('The following paths are unique:')
parts.extend(uniques)
test_case.assertTrue(not parts, '\n'.join(parts))
def _compare_directories(expected, output):
dircmp = _filecmp.dircmp(expected, output, ['.svn', 'CVS'])
differences = ['%s/%s' % (output, name) for name in dircmp.diff_files]
uniques = (['%s/%s' % (expected, name) for name in dircmp.left_only]
+ ['%s/%s' % (output, name) for name in dircmp.right_only])
for subdir in dircmp.common_dirs:
diffs, uniqs = _compare_directories('%s/%s' % (expected, subdir),
'%s/%s' % (output, subdir))
differences.extend(diffs)
uniques.extend(uniqs)
return differences, uniques
| agpl-3.0 |
hryamzik/ansible | lib/ansible/modules/network/aos/_aos_ip_pool.py | 28 | 10550 | #!/usr/bin/python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_ip_pool
author: Damien Garros (@dgarros)
version_added: "2.3"
short_description: Manage AOS IP Pool
deprecated:
removed_in: "2.9"
why: This module does not support AOS 2.1 or later
alternative: See new modules at U(https://www.ansible.com/ansible-apstra).
description:
- Apstra AOS Ip Pool module let you manage your IP Pool easily. You can create
create and delete IP Pool by Name, ID or by using a JSON File. This module
is idempotent and support the I(check) mode. It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- Name of the IP Pool to manage.
Only one of I(name), I(id) or I(content) can be set.
id:
description:
- AOS Id of the IP Pool to manage (can't be used to create a new IP Pool),
Only one of I(name), I(id) or I(content) can be set.
content:
description:
- Datastructure of the IP Pool to manage. The data can be in YAML / JSON or
directly a variable. It's the same datastructure that is returned
on success in I(value).
state:
description:
- Indicate what is the expected state of the IP Pool (present or not).
default: present
choices: ['present', 'absent']
subnets:
description:
- List of subnet that needs to be part of the IP Pool.
'''
EXAMPLES = '''
- name: "Create an IP Pool with one subnet"
aos_ip_pool:
session: "{{ aos_session }}"
name: "my-ip-pool"
subnets: [ 172.10.0.0/16 ]
state: present
- name: "Create an IP Pool with multiple subnets"
aos_ip_pool:
session: "{{ aos_session }}"
name: "my-other-ip-pool"
subnets: [ 172.10.0.0/16, 192.168.0.0./24 ]
state: present
- name: "Check if an IP Pool exist with same subnets by ID"
aos_ip_pool:
session: "{{ aos_session }}"
name: "45ab26fc-c2ed-4307-b330-0870488fa13e"
subnets: [ 172.10.0.0/16, 192.168.0.0./24 ]
state: present
- name: "Delete an IP Pool by name"
aos_ip_pool:
session: "{{ aos_session }}"
name: "my-ip-pool"
state: absent
- name: "Delete an IP pool by id"
aos_ip_pool:
session: "{{ aos_session }}"
id: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: absent
# Save an IP Pool to a file
- name: "Access IP Pool 1/3"
aos_ip_pool:
session: "{{ aos_session }}"
name: "my-ip-pool"
subnets: [ 172.10.0.0/16, 172.12.0.0/16 ]
state: present
register: ip_pool
- name: "Save Ip Pool into a file in JSON 2/3"
copy:
content: "{{ ip_pool.value | to_nice_json }}"
dest: ip_pool_saved.json
- name: "Save Ip Pool into a file in YAML 3/3"
copy:
content: "{{ ip_pool.value | to_nice_yaml }}"
dest: ip_pool_saved.yaml
- name: "Load IP Pool from a JSON file"
aos_ip_pool:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/ip_pool_saved.json') }}"
state: present
- name: "Load IP Pool from a YAML file"
aos_ip_pool:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/ip_pool_saved.yaml') }}"
state: present
- name: "Load IP Pool from a Variable"
aos_ip_pool:
session: "{{ aos_session }}"
content:
display_name: my-ip-pool
id: 4276738d-6f86-4034-9656-4bff94a34ea7
subnets:
- network: 172.10.0.0/16
- network: 172.12.0.0/16
state: present
'''
RETURNS = '''
name:
description: Name of the IP Pool
returned: always
type: str
sample: Server-IpAddrs
id:
description: AOS unique ID assigned to the IP Pool
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the object as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aos.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
def get_list_of_subnets(ip_pool):
subnets = []
for subnet in ip_pool.value['subnets']:
subnets.append(subnet['network'])
return subnets
def create_new_ip_pool(ip_pool, name, subnets):
# Create value
datum = dict(display_name=name, subnets=[])
for subnet in subnets:
datum['subnets'].append(dict(network=subnet))
ip_pool.datum = datum
# Write to AOS
return ip_pool.write()
#########################################################
# State Processing
#########################################################
def ip_pool_absent(module, aos, my_pool):
margs = module.params
# If the module do not exist, return directly
if my_pool.exists is False:
module.exit_json(changed=False, name=margs['name'], id='', value={})
# Check if object is currently in Use or Not
# If in Use, return an error
if my_pool.value:
if my_pool.value['status'] != 'not_in_use':
module.fail_json(msg="unable to delete this ip Pool, currently in use")
else:
module.fail_json(msg="Ip Pool object has an invalid format, value['status'] must be defined")
# If not in check mode, delete Ip Pool
if not module.check_mode:
try:
my_pool.delete()
except:
module.fail_json(msg="An error occurred, while trying to delete the IP Pool")
module.exit_json(changed=True,
name=my_pool.name,
id=my_pool.id,
value={})
def ip_pool_present(module, aos, my_pool):
margs = module.params
# if content is defined, create object from Content
try:
if margs['content'] is not None:
if 'display_name' in module.params['content'].keys():
do_load_resource(module, aos.IpPools, module.params['content']['display_name'])
else:
module.fail_json(msg="Unable to find display_name in 'content', Mandatory")
except:
module.fail_json(msg="Unable to load resource from content, something went wrong")
# if ip_pool doesn't exist already, create a new one
if my_pool.exists is False and 'name' not in margs.keys():
module.fail_json(msg="Name is mandatory for module that don't exist currently")
elif my_pool.exists is False:
if not module.check_mode:
try:
my_new_pool = create_new_ip_pool(my_pool, margs['name'], margs['subnets'])
my_pool = my_new_pool
except:
module.fail_json(msg="An error occurred while trying to create a new IP Pool ")
module.exit_json(changed=True,
name=my_pool.name,
id=my_pool.id,
value=my_pool.value)
# if pool already exist, check if list of network is the same
# if same just return the object and report change false
if set(get_list_of_subnets(my_pool)) == set(margs['subnets']):
module.exit_json(changed=False,
name=my_pool.name,
id=my_pool.id,
value=my_pool.value)
else:
module.fail_json(msg="ip_pool already exist but value is different, currently not supported to update a module")
#########################################################
# Main Function
#########################################################
def ip_pool(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['content'] is not None:
content = content_to_dict(module, margs['content'])
if 'display_name' in content.keys():
item_name = content['display_name']
else:
module.fail_json(msg="Unable to extract 'display_name' from 'content'")
elif margs['name'] is not None:
item_name = margs['name']
elif margs['id'] is not None:
item_id = margs['id']
# ----------------------------------------------------
# Find Object if available based on ID or Name
# ----------------------------------------------------
try:
my_pool = find_collection_item(aos.IpPools,
item_name=item_name,
item_id=item_id)
except:
module.fail_json(msg="Unable to find the IP Pool based on name or ID, something went wrong")
# ----------------------------------------------------
# Proceed based on State value
# ----------------------------------------------------
if margs['state'] == 'absent':
ip_pool_absent(module, aos, my_pool)
elif margs['state'] == 'present':
ip_pool_present(module, aos, my_pool)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False),
id=dict(required=False),
content=dict(required=False, type="json"),
state=dict(required=False,
choices=['present', 'absent'],
default="present"),
subnets=dict(required=False, type="list")
),
mutually_exclusive=[('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
ip_pool(module)
if __name__ == "__main__":
main()
| gpl-3.0 |
blackmiaool/rt-thread | bsp/stm32f10x/rtconfig.py | 39 | 3579 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
#device options
# STM32_TYPE =
# 'STM32F10X_LD','STM32F10X_LD_VL',
# 'STM32F10X_MD','STM32F10X_MD_VL',
# 'STM32F10X_HD','STM32F10X_HD_VL',
# 'STM32F10X_XL','STM32F10X_CL'
STM32_TYPE = 'STM32F10X_HD'
# lcd panel options
# 'FMT0371','ILI932X', 'SSD1289'
RT_USING_LCD_TYPE = 'SSD1289'
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'D:/SourceryGCC/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
IAR_PATH = 'C:/Program Files/IAR Systems/Embedded Workbench 6.0 Evaluation'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-stm32.map,-cref,-u,Reset_Handler -T stm32_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMSTM'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-stm32.map --scatter stm32_rom.sct'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' -D USE_STDPERIPH_DRIVER' + ' -D STM32F10X_HD'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --debug'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + IAR_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' -Ol'
CFLAGS += ' --use_c++_inline'
AFLAGS = ''
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
LFLAGS = ' --config stm32f10x_flash.icf'
LFLAGS += ' --redirect _Printf=_PrintfTiny'
LFLAGS += ' --redirect _Scanf=_ScanfSmall'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = IAR_PATH + '/arm/bin/'
POST_ACTION = ''
| gpl-2.0 |
solintegra/addons | purchase_mrp_project_link/models/purchase_order.py | 29 | 1251 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
main_project_id = fields.Many2one('project.project', string="Main Project")
@api.one
@api.onchange('main_project_id')
def onchange_project_id(self):
for line in self.order_line:
line.account_analytic_id = self.main_project_id.analytic_account_id
| agpl-3.0 |
heartsucker/securedrop | securedrop/source_app/__init__.py | 2 | 6859 | from datetime import datetime, timedelta
from flask import (Flask, render_template, flash, Markup, request, g, session,
url_for, redirect)
from flask_babel import gettext
from flask_assets import Environment
from flask_wtf.csrf import CSRFProtect, CSRFError
from jinja2 import evalcontextfilter
from os import path
from sqlalchemy.orm.exc import NoResultFound
import i18n
import template_filters
import version
from crypto_util import CryptoUtil
from db import db
from models import Source
from request_that_secures_file_uploads import RequestThatSecuresFileUploads
from source_app import main, info, api
from source_app.decorators import ignore_static
from source_app.utils import logged_in
from store import Storage
from worker import rq_worker_queue
import typing
# https://www.python.org/dev/peps/pep-0484/#runtime-or-type-checking
if typing.TYPE_CHECKING:
# flake8 can not understand type annotation yet.
# That is why all type annotation relative import
# statements has to be marked as noqa.
# http://flake8.pycqa.org/en/latest/user/error-codes.html?highlight=f401
from sdconfig import SDConfig # noqa: F401
def create_app(config):
# type: (SDConfig) -> Flask
app = Flask(__name__,
template_folder=config.SOURCE_TEMPLATES_DIR,
static_folder=path.join(config.SECUREDROP_ROOT, 'static'))
app.request_class = RequestThatSecuresFileUploads
app.config.from_object(config.SourceInterfaceFlaskConfig) # type: ignore
app.sdconfig = config
# The default CSRF token expiration is 1 hour. Since large uploads can
# take longer than an hour over Tor, we increase the valid window to 24h.
app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24
CSRFProtect(app)
if config.DATABASE_ENGINE == "sqlite":
db_uri = (config.DATABASE_ENGINE + ":///" +
config.DATABASE_FILE)
else:
db_uri = (
config.DATABASE_ENGINE + '://' +
config.DATABASE_USERNAME + ':' +
config.DATABASE_PASSWORD + '@' +
config.DATABASE_HOST + '/' +
config.DATABASE_NAME
)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
db.init_app(app)
app.storage = Storage(config.STORE_DIR,
config.TEMP_DIR,
config.JOURNALIST_KEY)
app.crypto_util = CryptoUtil(
scrypt_params=config.SCRYPT_PARAMS,
scrypt_id_pepper=config.SCRYPT_ID_PEPPER,
scrypt_gpg_pepper=config.SCRYPT_GPG_PEPPER,
securedrop_root=config.SECUREDROP_ROOT,
word_list=config.WORD_LIST,
nouns_file=config.NOUNS,
adjectives_file=config.ADJECTIVES,
gpg_key_dir=config.GPG_KEY_DIR,
)
app.config['RQ_WORKER_NAME'] = config.RQ_WORKER_NAME
rq_worker_queue.init_app(app)
@app.errorhandler(CSRFError)
def handle_csrf_error(e):
msg = render_template('session_timeout.html')
session.clear()
flash(Markup(msg), "important")
return redirect(url_for('main.index'))
assets = Environment(app)
app.config['assets'] = assets
i18n.setup_app(config, app)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.jinja_env.globals['version'] = version.__version__
if getattr(config, 'CUSTOM_HEADER_IMAGE', None):
app.jinja_env.globals['header_image'] = \
config.CUSTOM_HEADER_IMAGE # type: ignore
app.jinja_env.globals['use_custom_header_image'] = True
else:
app.jinja_env.globals['header_image'] = 'logo.png'
app.jinja_env.globals['use_custom_header_image'] = False
app.jinja_env.filters['rel_datetime_format'] = \
template_filters.rel_datetime_format
app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)
app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat
for module in [main, info, api]:
app.register_blueprint(module.make_blueprint(config)) # type: ignore
@app.before_request
@ignore_static
def setup_i18n():
"""Store i18n-related values in Flask's special g object"""
g.locale = i18n.get_locale(config)
g.text_direction = i18n.get_text_direction(g.locale)
g.html_lang = i18n.locale_to_rfc_5646(g.locale)
g.locales = i18n.get_locale2name()
@app.before_request
@ignore_static
def check_tor2web():
# ignore_static here so we only flash a single message warning
# about Tor2Web, corresponding to the initial page load.
if 'X-tor2web' in request.headers:
flash(Markup(gettext(
'<strong>WARNING: </strong> '
'You appear to be using Tor2Web. '
'This <strong> does not </strong> '
'provide anonymity. '
'<a href="{url}">Why is this dangerous?</a>')
.format(url=url_for('info.tor2web_warning'))),
"banner-warning")
@app.before_request
@ignore_static
def setup_g():
"""Store commonly used values in Flask's special g object"""
if 'expires' in session and datetime.utcnow() >= session['expires']:
msg = render_template('session_timeout.html')
# clear the session after we render the message so it's localized
session.clear()
flash(Markup(msg), "important")
session['expires'] = datetime.utcnow() + \
timedelta(minutes=getattr(config,
'SESSION_EXPIRATION_MINUTES',
120))
# ignore_static here because `crypto_util.hash_codename` is scrypt
# (very time consuming), and we don't need to waste time running if
# we're just serving a static resource that won't need to access
# these common values.
if logged_in():
g.codename = session['codename']
g.filesystem_id = app.crypto_util.hash_codename(g.codename)
try:
g.source = Source.query \
.filter(Source.filesystem_id == g.filesystem_id) \
.one()
except NoResultFound as e:
app.logger.error(
"Found no Sources when one was expected: %s" %
(e,))
del session['logged_in']
del session['codename']
return redirect(url_for('main.index'))
g.loc = app.storage.path(g.filesystem_id)
@app.errorhandler(404)
def page_not_found(error):
return render_template('notfound.html'), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('error.html'), 500
return app
| agpl-3.0 |
mikemow/youtube-dl | youtube_dl/extractor/footyroom.py | 104 | 1590 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class FootyRoomIE(InfoExtractor):
_VALID_URL = r'http://footyroom\.com/(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://footyroom.com/schalke-04-0-2-real-madrid-2015-02/',
'info_dict': {
'id': 'schalke-04-0-2-real-madrid-2015-02',
'title': 'Schalke 04 0 – 2 Real Madrid',
},
'playlist_count': 3,
}, {
'url': 'http://footyroom.com/georgia-0-2-germany-2015-03/',
'info_dict': {
'id': 'georgia-0-2-germany-2015-03',
'title': 'Georgia 0 – 2 Germany',
},
'playlist_count': 1,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist = self._parse_json(
self._search_regex(
r'VideoSelector\.load\((\[.+?\])\);', webpage, 'video selector'),
playlist_id)
playlist_title = self._og_search_title(webpage)
entries = []
for video in playlist:
payload = video.get('payload')
if not payload:
continue
playwire_url = self._search_regex(
r'data-config="([^"]+)"', payload,
'playwire url', default=None)
if playwire_url:
entries.append(self.url_result(self._proto_relative_url(
playwire_url, 'http:'), 'Playwire'))
return self.playlist_result(entries, playlist_id, playlist_title)
| unlicense |
king19860907/disconf | disconf-client/tools/my_printzootree.py | 16 | 1415 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Created on 2013-5-23
@author: liaoqiqi
打印zookeeper树
'''
from kazoo.client import KazooClient
from kazoo.client import KazooState
import datetime
import time
hosts = '10.48.57.42:8581,10.48.57.42:8582,10.48.57.42:8583'
zk = KazooClient(hosts=hosts)
zk.start()
print "hosts", hosts
print "version", zk.server_version()
#
#
#
def printTreeNode(basepath, node, i):
if isinstance(node, list) :
for item in node:
# current
current_base_path = basepath + "/" + item
data, stat = zk.get(current_base_path)
ahead = ""
j = 1
while j < i:
ahead += "\t"
j += 1
print ahead + "|----" + item + "\t" + data.decode("utf-8") + "\t" + \
time.strftime("%Y%m%d%H%M%S", time.localtime(stat.ctime/1000)) + "\t" + \
time.strftime("%Y%m%d%H%M%S", time.localtime(stat.mtime/1000))
# recursive
items = zk.get_children(current_base_path)
printTreeNode(current_base_path, items, i + 1)
#
# root
#
def printtreeRoot(path):
# get children
base_path = path
children = zk.get_children(base_path)
print base_path
printTreeNode(base_path, children, 1)
#
#
#
if __name__ == '__main__':
printtreeRoot("/")
zk.stop()
| gpl-2.0 |
jptomo/rpython-lang-scheme | rpython/rlib/rmd5.py | 2 | 14169 | # -*- coding: iso-8859-1 -*-
"""
RPython implementation of MD5 checksums.
See also the pure Python implementation in lib_pypy/md5.py, which might
or might not be faster than this one on top of CPython.
This is an implementation of the MD5 hash function,
as specified by RFC 1321. It was implemented using Bruce Schneier's
excellent book "Applied Cryptography", 2nd ed., 1996.
This module tries to follow the API of the CPython md5 module.
Long history:
By Dinu C. Gherman. BEWARE: this comes with no guarantee whatsoever
about fitness and/or other properties! Specifically, do not use this
in any production code! License is Python License! (Re-licensing
under the MIT would be great, though)
Special thanks to Aurelian Coman who fixed some nasty bugs!
Modernised by J. Hallén and L. Creighton for Pypy.
Converted to RPython by arigo.
"""
from rpython.rlib.rarithmetic import r_uint, r_ulonglong
if r_uint.BITS == 32:
def _rotateLeft(x, n):
"Rotate x (32 bit) left n bits circularly."
return (x << n) | (x >> (32-n))
else:
def _rotateLeft_emulator(x, n):
x &= 0xFFFFFFFF
return (x << n) | (x >> (32-n))
# ----- start of custom code, think about something better... -----
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.translator.tool.cbuild import ExternalCompilationInfo
eci = ExternalCompilationInfo(post_include_bits=["""
static unsigned long pypy__rotateLeft(unsigned long x, long n) {
unsigned int x1 = x; /* arithmetic directly on int */
int n1 = n;
return (x1 << n1) | (x1 >> (32-n1));
}
"""])
_rotateLeft = rffi.llexternal(
"pypy__rotateLeft", [lltype.Unsigned, lltype.Signed], lltype.Unsigned,
_callable=_rotateLeft_emulator, compilation_info=eci,
_nowrapper=True, elidable_function=True)
# we expect the function _rotateLeft to be actually inlined
def _state2string(a, b, c, d):
return ''.join([
chr(a&0xFF), chr((a>>8)&0xFF), chr((a>>16)&0xFF), chr((a>>24)&0xFF),
chr(b&0xFF), chr((b>>8)&0xFF), chr((b>>16)&0xFF), chr((b>>24)&0xFF),
chr(c&0xFF), chr((c>>8)&0xFF), chr((c>>16)&0xFF), chr((c>>24)&0xFF),
chr(d&0xFF), chr((d>>8)&0xFF), chr((d>>16)&0xFF), chr((d>>24)&0xFF),
])
def _state2hexstring(a, b, c, d):
hx = '0123456789abcdef'
return ''.join([
hx[(a>>4)&0xF], hx[a&0xF], hx[(a>>12)&0xF], hx[(a>>8)&0xF],
hx[(a>>20)&0xF], hx[(a>>16)&0xF], hx[(a>>28)&0xF], hx[(a>>24)&0xF],
hx[(b>>4)&0xF], hx[b&0xF], hx[(b>>12)&0xF], hx[(b>>8)&0xF],
hx[(b>>20)&0xF], hx[(b>>16)&0xF], hx[(b>>28)&0xF], hx[(b>>24)&0xF],
hx[(c>>4)&0xF], hx[c&0xF], hx[(c>>12)&0xF], hx[(c>>8)&0xF],
hx[(c>>20)&0xF], hx[(c>>16)&0xF], hx[(c>>28)&0xF], hx[(c>>24)&0xF],
hx[(d>>4)&0xF], hx[d&0xF], hx[(d>>12)&0xF], hx[(d>>8)&0xF],
hx[(d>>20)&0xF], hx[(d>>16)&0xF], hx[(d>>28)&0xF], hx[(d>>24)&0xF],
])
def _string2uintlist(s, start, count, result):
"""Build a list of count r_uint's by unpacking the string
s[start:start+4*count] in little-endian order.
"""
for i in range(count):
p = start + i * 4
x = r_uint(ord(s[p]))
x |= r_uint(ord(s[p+1])) << 8
x |= r_uint(ord(s[p+2])) << 16
x |= r_uint(ord(s[p+3])) << 24
result[i] = x
# ======================================================================
# The real MD5 meat...
#
# Implemented after "Applied Cryptography", 2nd ed., 1996,
# pp. 436-441 by Bruce Schneier.
# ======================================================================
# F, G, H and I are basic MD5 functions.
def F(x, y, z):
return (x & y) | ((~x) & z)
def G(x, y, z):
return (x & z) | (y & (~z))
def H(x, y, z):
return x ^ y ^ z
def I(x, y, z):
return y ^ (x | (~z))
def XX(func, a, b, c, d, x, s, ac):
"""Wrapper for call distribution to functions F, G, H and I.
This replaces functions FF, GG, HH and II from "Appl. Crypto."
Rotation is separate from addition to prevent recomputation
(now summed-up in one function).
"""
res = a + func(b, c, d)
res = res + x
res = res + ac
res = _rotateLeft(res, s)
res = res + b
return res
XX._annspecialcase_ = 'specialize:arg(0)' # performance hint
class RMD5(object):
"""RPython-level MD5 object.
"""
def __init__(self, initialdata=''):
self._init()
self.update(initialdata)
def _init(self):
"""Set this object to an initial empty state.
"""
self.count = r_ulonglong(0) # total number of bytes
self.input = "" # pending unprocessed data, < 64 bytes
self.uintbuffer = [r_uint(0)] * 16
# Load magic initialization constants.
self.A = r_uint(0x67452301L)
self.B = r_uint(0xefcdab89L)
self.C = r_uint(0x98badcfeL)
self.D = r_uint(0x10325476L)
def _transform(self, inp):
"""Basic MD5 step transforming the digest based on the input.
Note that if the Mysterious Constants are arranged backwards
in little-endian order and decrypted with the DES they produce
OCCULT MESSAGES!
"""
# 'inp' is a list of 16 r_uint values.
a, b, c, d = A, B, C, D = self.A, self.B, self.C, self.D
# Round 1.
S11, S12, S13, S14 = 7, 12, 17, 22
a = XX(F, a, b, c, d, inp[ 0], S11, r_uint(0xD76AA478L)) # 1
d = XX(F, d, a, b, c, inp[ 1], S12, r_uint(0xE8C7B756L)) # 2
c = XX(F, c, d, a, b, inp[ 2], S13, r_uint(0x242070DBL)) # 3
b = XX(F, b, c, d, a, inp[ 3], S14, r_uint(0xC1BDCEEEL)) # 4
a = XX(F, a, b, c, d, inp[ 4], S11, r_uint(0xF57C0FAFL)) # 5
d = XX(F, d, a, b, c, inp[ 5], S12, r_uint(0x4787C62AL)) # 6
c = XX(F, c, d, a, b, inp[ 6], S13, r_uint(0xA8304613L)) # 7
b = XX(F, b, c, d, a, inp[ 7], S14, r_uint(0xFD469501L)) # 8
a = XX(F, a, b, c, d, inp[ 8], S11, r_uint(0x698098D8L)) # 9
d = XX(F, d, a, b, c, inp[ 9], S12, r_uint(0x8B44F7AFL)) # 10
c = XX(F, c, d, a, b, inp[10], S13, r_uint(0xFFFF5BB1L)) # 11
b = XX(F, b, c, d, a, inp[11], S14, r_uint(0x895CD7BEL)) # 12
a = XX(F, a, b, c, d, inp[12], S11, r_uint(0x6B901122L)) # 13
d = XX(F, d, a, b, c, inp[13], S12, r_uint(0xFD987193L)) # 14
c = XX(F, c, d, a, b, inp[14], S13, r_uint(0xA679438EL)) # 15
b = XX(F, b, c, d, a, inp[15], S14, r_uint(0x49B40821L)) # 16
# Round 2.
S21, S22, S23, S24 = 5, 9, 14, 20
a = XX(G, a, b, c, d, inp[ 1], S21, r_uint(0xF61E2562L)) # 17
d = XX(G, d, a, b, c, inp[ 6], S22, r_uint(0xC040B340L)) # 18
c = XX(G, c, d, a, b, inp[11], S23, r_uint(0x265E5A51L)) # 19
b = XX(G, b, c, d, a, inp[ 0], S24, r_uint(0xE9B6C7AAL)) # 20
a = XX(G, a, b, c, d, inp[ 5], S21, r_uint(0xD62F105DL)) # 21
d = XX(G, d, a, b, c, inp[10], S22, r_uint(0x02441453L)) # 22
c = XX(G, c, d, a, b, inp[15], S23, r_uint(0xD8A1E681L)) # 23
b = XX(G, b, c, d, a, inp[ 4], S24, r_uint(0xE7D3FBC8L)) # 24
a = XX(G, a, b, c, d, inp[ 9], S21, r_uint(0x21E1CDE6L)) # 25
d = XX(G, d, a, b, c, inp[14], S22, r_uint(0xC33707D6L)) # 26
c = XX(G, c, d, a, b, inp[ 3], S23, r_uint(0xF4D50D87L)) # 27
b = XX(G, b, c, d, a, inp[ 8], S24, r_uint(0x455A14EDL)) # 28
a = XX(G, a, b, c, d, inp[13], S21, r_uint(0xA9E3E905L)) # 29
d = XX(G, d, a, b, c, inp[ 2], S22, r_uint(0xFCEFA3F8L)) # 30
c = XX(G, c, d, a, b, inp[ 7], S23, r_uint(0x676F02D9L)) # 31
b = XX(G, b, c, d, a, inp[12], S24, r_uint(0x8D2A4C8AL)) # 32
# Round 3.
S31, S32, S33, S34 = 4, 11, 16, 23
a = XX(H, a, b, c, d, inp[ 5], S31, r_uint(0xFFFA3942L)) # 33
d = XX(H, d, a, b, c, inp[ 8], S32, r_uint(0x8771F681L)) # 34
c = XX(H, c, d, a, b, inp[11], S33, r_uint(0x6D9D6122L)) # 35
b = XX(H, b, c, d, a, inp[14], S34, r_uint(0xFDE5380CL)) # 36
a = XX(H, a, b, c, d, inp[ 1], S31, r_uint(0xA4BEEA44L)) # 37
d = XX(H, d, a, b, c, inp[ 4], S32, r_uint(0x4BDECFA9L)) # 38
c = XX(H, c, d, a, b, inp[ 7], S33, r_uint(0xF6BB4B60L)) # 39
b = XX(H, b, c, d, a, inp[10], S34, r_uint(0xBEBFBC70L)) # 40
a = XX(H, a, b, c, d, inp[13], S31, r_uint(0x289B7EC6L)) # 41
d = XX(H, d, a, b, c, inp[ 0], S32, r_uint(0xEAA127FAL)) # 42
c = XX(H, c, d, a, b, inp[ 3], S33, r_uint(0xD4EF3085L)) # 43
b = XX(H, b, c, d, a, inp[ 6], S34, r_uint(0x04881D05L)) # 44
a = XX(H, a, b, c, d, inp[ 9], S31, r_uint(0xD9D4D039L)) # 45
d = XX(H, d, a, b, c, inp[12], S32, r_uint(0xE6DB99E5L)) # 46
c = XX(H, c, d, a, b, inp[15], S33, r_uint(0x1FA27CF8L)) # 47
b = XX(H, b, c, d, a, inp[ 2], S34, r_uint(0xC4AC5665L)) # 48
# Round 4.
S41, S42, S43, S44 = 6, 10, 15, 21
a = XX(I, a, b, c, d, inp[ 0], S41, r_uint(0xF4292244L)) # 49
d = XX(I, d, a, b, c, inp[ 7], S42, r_uint(0x432AFF97L)) # 50
c = XX(I, c, d, a, b, inp[14], S43, r_uint(0xAB9423A7L)) # 51
b = XX(I, b, c, d, a, inp[ 5], S44, r_uint(0xFC93A039L)) # 52
a = XX(I, a, b, c, d, inp[12], S41, r_uint(0x655B59C3L)) # 53
d = XX(I, d, a, b, c, inp[ 3], S42, r_uint(0x8F0CCC92L)) # 54
c = XX(I, c, d, a, b, inp[10], S43, r_uint(0xFFEFF47DL)) # 55
b = XX(I, b, c, d, a, inp[ 1], S44, r_uint(0x85845DD1L)) # 56
a = XX(I, a, b, c, d, inp[ 8], S41, r_uint(0x6FA87E4FL)) # 57
d = XX(I, d, a, b, c, inp[15], S42, r_uint(0xFE2CE6E0L)) # 58
c = XX(I, c, d, a, b, inp[ 6], S43, r_uint(0xA3014314L)) # 59
b = XX(I, b, c, d, a, inp[13], S44, r_uint(0x4E0811A1L)) # 60
a = XX(I, a, b, c, d, inp[ 4], S41, r_uint(0xF7537E82L)) # 61
d = XX(I, d, a, b, c, inp[11], S42, r_uint(0xBD3AF235L)) # 62
c = XX(I, c, d, a, b, inp[ 2], S43, r_uint(0x2AD7D2BBL)) # 63
b = XX(I, b, c, d, a, inp[ 9], S44, r_uint(0xEB86D391L)) # 64
A += a
B += b
C += c
D += d
self.A, self.B, self.C, self.D = A, B, C, D
def _finalize(self, digestfunc):
"""Logic to add the final padding and extract the digest.
"""
# Save the state before adding the padding
count = self.count
input = self.input
A = self.A
B = self.B
C = self.C
D = self.D
index = len(input)
if index < 56:
padLen = 56 - index
else:
padLen = 120 - index
if padLen:
self.update('\200' + '\000' * (padLen-1))
# Append length (before padding).
assert len(self.input) == 56
W = self.uintbuffer
_string2uintlist(self.input, 0, 14, W)
length_in_bits = count << 3
W[14] = r_uint(length_in_bits)
W[15] = r_uint(length_in_bits >> 32)
self._transform(W)
# Store state in digest.
digest = digestfunc(self.A, self.B, self.C, self.D)
# Restore the saved state in case this instance is still used
self.count = count
self.input = input
self.A = A
self.B = B
self.C = C
self.D = D
return digest
# Down from here all methods follow the Python Standard Library
# API of the md5 module.
def update(self, inBuf):
"""Add to the current message.
Update the md5 object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments, i.e. m.update(a); m.update(b) is equivalent
to m.update(a+b).
The hash is immediately calculated for all full blocks. The final
calculation is made in digest(). This allows us to keep an
intermediate value for the hash, so that we only need to make
minimal recalculation if we call update() to add moredata to
the hashed string.
"""
leninBuf = len(inBuf)
self.count += leninBuf
index = len(self.input)
partLen = 64 - index
assert partLen > 0
if leninBuf >= partLen:
W = self.uintbuffer
self.input = self.input + inBuf[:partLen]
_string2uintlist(self.input, 0, 16, W)
self._transform(W)
i = partLen
while i + 64 <= leninBuf:
_string2uintlist(inBuf, i, 16, W)
self._transform(W)
i = i + 64
else:
self.input = inBuf[i:leninBuf]
else:
self.input = self.input + inBuf
def digest(self):
"""Terminate the message-digest computation and return digest.
Return the digest of the strings passed to the update()
method so far. This is a 16-byte string which may contain
non-ASCII characters, including null bytes.
"""
return self._finalize(_state2string)
def hexdigest(self):
"""Terminate and return digest in HEX form.
Like digest() except the digest is returned as a string of
length 32, containing only hexadecimal digits. This may be
used to exchange the value safely in email or other non-
binary environments.
"""
return self._finalize(_state2hexstring)
def copy(self):
"""Return a clone object.
Return a copy ('clone') of the md5 object. This can be used
to efficiently compute the digests of strings that share
a common initial substring.
"""
clone = RMD5()
clone._copyfrom(self)
return clone
def _copyfrom(self, other):
"""Copy all state from 'other' into 'self'.
"""
self.count = other.count
self.input = other.input
self.A = other.A
self.B = other.B
self.C = other.C
self.D = other.D
# synonyms to build new RMD5 objects, for compatibility with the
# CPython md5 module interface.
md5 = RMD5
new = RMD5
digest_size = 16
| mit |
neteler/QGIS | python/plugins/processing/algs/grass/ext/r_sum.py | 7 | 1211 | # -*- coding: utf-8 -*-
"""
***************************************************************************
r_sum.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import HtmlReportPostProcessor
def postProcessResults(alg):
HtmlReportPostProcessor.postProcessResults(alg)
| gpl-2.0 |
alex/boto | boto/ec2/bundleinstance.py | 152 | 2754 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Bundle Task
"""
from boto.ec2.ec2object import EC2Object
class BundleInstanceTask(EC2Object):
def __init__(self, connection=None):
super(BundleInstanceTask, self).__init__(connection)
self.id = None
self.instance_id = None
self.progress = None
self.start_time = None
self.state = None
self.bucket = None
self.prefix = None
self.upload_policy = None
self.upload_policy_signature = None
self.update_time = None
self.code = None
self.message = None
def __repr__(self):
return 'BundleInstanceTask:%s' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'bundleId':
self.id = value
elif name == 'instanceId':
self.instance_id = value
elif name == 'progress':
self.progress = value
elif name == 'startTime':
self.start_time = value
elif name == 'state':
self.state = value
elif name == 'bucket':
self.bucket = value
elif name == 'prefix':
self.prefix = value
elif name == 'uploadPolicy':
self.upload_policy = value
elif name == 'uploadPolicySignature':
self.upload_policy_signature = value
elif name == 'updateTime':
self.update_time = value
elif name == 'code':
self.code = value
elif name == 'message':
self.message = value
else:
setattr(self, name, value)
| mit |
wenxinguo/xisip | tests/pjsua/scripts-call/301_ice_public_a.py | 59 | 1052 | # $Id: 301_ice_public_a.py 2392 2008-12-22 18:54:58Z bennylp $
#
from inc_cfg import *
# Note:
# - need --dis-codec to make INVITE packet less than typical MTU
uas_args = "--null-audio --id=\"<sip:test1@pjsip.org>\" --registrar=sip:sip.pjsip.org --username=test1 --password=test1 --realm=pjsip.org --proxy=\"sip:sip.pjsip.org;lr\" --rtp-port 0 --stun-srv stun.pjsip.org --use-ice --use-compact-form --max-calls 1 --dis-codec=i --dis-codec=s --dis-codec=g"
uac_args = "--null-audio --id=\"<sip:test2@pjsip.org>\" --registrar=sip:sip.pjsip.org --username=test2 --password=test2 --realm=pjsip.org --proxy=\"sip:sip.pjsip.org;lr\" --rtp-port 0 --stun-srv stun.pjsip.org --use-ice --use-compact-form --max-calls 1 --dis-codec=i --dis-codec=s --dis-codec=g"
test_param = TestParam(
"ICE via public internet",
[
InstanceParam( "callee", uas_args,
uri="<sip:test1@pjsip.org>",
have_reg=True, have_publish=False),
InstanceParam( "caller", uac_args,
uri="<sip:test2@pjsip.org>",
have_reg=True, have_publish=False),
]
)
| gpl-2.0 |
cloudbau/cinder | cinder/volume/drivers/nexenta/nfs.py | 1 | 16316 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nexenta.nfs` -- Driver to store volumes on NexentaStor Appliance.
=======================================================================
.. automodule:: nexenta.nfs
.. moduleauthor:: Mikhail Khodos <hodosmb@gmail.com>
.. moduleauthor:: Victor Rodionov <victor.rodionov@nexenta.com>
"""
import hashlib
import os
from oslo.config import cfg
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import units
from cinder.volume.drivers import nexenta
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import options
from cinder.volume.drivers.nexenta import utils
from cinder.volume.drivers import nfs
VERSION = '1.1.2'
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(options.NEXENTA_NFS_OPTIONS)
class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921
"""Executes volume driver commands on Nexenta Appliance.
Version history:
1.0.0 - Initial driver version.
1.1.0 - Auto sharing for enclosing folder.
1.1.1 - Added caching for NexentaStor appliance 'volroot' value.
1.1.2 - Ignore "folder does not exist" error in delete_volume and
delete_snapshot method.
"""
VERSION = VERSION
driver_prefix = 'nexenta'
def __init__(self, *args, **kwargs):
super(NexentaNfsDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.configuration.append_config_values(
options.NEXENTA_NFS_OPTIONS)
conf = self.configuration
self.nms_cache_volroot = conf.nexenta_nms_cache_volroot
self._nms2volroot = {}
def do_setup(self, context):
super(NexentaNfsDriver, self).do_setup(context)
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
def check_for_setup_error(self):
"""Verify that the volume for our folder exists.
:raise: :py:exc:`LookupError`
"""
if self.share2nms:
for nfs_share in self.share2nms:
nms = self.share2nms[nfs_share]
volume_name, dataset = self._get_share_datasets(nfs_share)
if not nms.volume.object_exists(volume_name):
raise LookupError(_("Volume %s does not exist in Nexenta "
"Store appliance"), volume_name)
folder = '%s/%s' % (volume_name, dataset)
if not nms.folder.object_exists(folder):
raise LookupError(_("Folder %s does not exist in Nexenta "
"Store appliance"), folder)
self._share_folder(nms, volume_name, dataset)
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
export = '%s/%s' % (volume['provider_location'], volume['name'])
data = {'export': export, 'name': 'volume'}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data
}
def _do_create_volume(self, volume):
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s' % (dataset, volume['name'])
LOG.debug(_('Creating folder on Nexenta Store %s'), folder)
nms.folder.create_with_props(
vol, folder,
{'compression': self.configuration.nexenta_volume_compression}
)
volume_path = self.remote_path(volume)
volume_size = volume['size']
try:
self._share_folder(nms, vol, folder)
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
self._create_sparsed_file(nms, volume_path, volume_size)
else:
compression = nms.folder.get('compression')
if compression != 'off':
# Disable compression, because otherwise will not use space
# on disk.
nms.folder.set('compression', 'off')
try:
self._create_regular_file(nms, volume_path, volume_size)
finally:
if compression != 'off':
# Backup default compression value if it was changed.
nms.folder.set('compression', compression)
self._set_rw_permissions_for_all(nms, volume_path)
except nexenta.NexentaException as exc:
try:
nms.folder.destroy('%s/%s' % (vol, folder))
except nexenta.NexentaException:
LOG.warning(_("Cannot destroy created folder: "
"%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise exc
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self._ensure_shares_mounted()
snapshot_vol = self._get_snapshot_volume(snapshot)
nfs_share = snapshot_vol['provider_location']
volume['provider_location'] = nfs_share
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
snapshot_name = '%s/%s/%s@%s' % (vol, dataset, snapshot['volume_name'],
snapshot['name'])
folder = '%s/%s' % (dataset, volume['name'])
nms.folder.clone(snapshot_name, '%s/%s' % (vol, folder))
try:
self._share_folder(nms, vol, folder)
except nexenta.NexentaException:
try:
nms.folder.destroy('%s/%s' % (vol, folder), '')
except nexenta.NexentaException:
LOG.warning(_("Cannot destroy cloned folder: "
"%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise
return {'provider_location': volume['provider_location']}
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: new volume reference
:param src_vref: source volume reference
"""
LOG.info(_('Creating clone of volume: %s'), src_vref['id'])
snapshot = {'volume_name': src_vref['name'],
'name': 'cinder-clone-snap-%(id)s' % volume}
# We don't delete this snapshot, because this snapshot will be origin
# of new volume. This snapshot will be automatically promoted by NMS
# when user will delete its origin.
self.create_snapshot(snapshot)
try:
self.create_volume_from_snapshot(volume, snapshot)
except nexenta.NexentaException:
LOG.error(_('Volume creation failed, deleting created snapshot '
'%(volume_name)s@%(name)s'), snapshot)
try:
self.delete_snapshot(snapshot)
except (nexenta.NexentaException, exception.SnapshotIsBusy):
LOG.warning(_('Failed to delete zfs snapshot '
'%(volume_name)s@%(name)s'), snapshot)
raise
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
super(NexentaNfsDriver, self).delete_volume(volume)
nfs_share = volume['provider_location']
if nfs_share:
nms = self.share2nms[nfs_share]
vol, parent_folder = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, parent_folder, volume['name'])
try:
nms.folder.destroy(folder, '')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_('Folder %s does not exist, it seems it was '
'already deleted.'), folder)
return
raise
def create_snapshot(self, snapshot):
"""Creates a snapshot.
:param snapshot: snapshot reference
"""
volume = self._get_snapshot_volume(snapshot)
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, dataset, volume['name'])
nms.folder.create_snapshot(folder, snapshot['name'], '-r')
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: snapshot reference
"""
volume = self._get_snapshot_volume(snapshot)
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, dataset, volume['name'])
try:
nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_('Snapshot %s does not exist, it seems it was '
'already deleted.'), '%s@%s' % (folder, snapshot))
return
raise
def _create_sparsed_file(self, nms, path, size):
"""Creates file with 0 disk usage.
:param nms: nms object
:param path: path to new file
:param size: size of file
"""
block_size_mb = 1
block_count = size * units.GiB / (block_size_mb * units.MiB)
nms.appliance.execute(
'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=0 seek=%(count)d' % {
'path': path,
'bs': block_size_mb,
'count': block_count
}
)
def _create_regular_file(self, nms, path, size):
"""Creates regular file of given size.
Takes a lot of time for large files.
:param nms: nms object
:param path: path to new file
:param size: size of file
"""
block_size_mb = 1
block_count = size * units.GiB / (block_size_mb * units.MiB)
LOG.info(_('Creating regular file: %s.'
'This may take some time.') % path)
nms.appliance.execute(
'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % {
'path': path,
'bs': block_size_mb,
'count': block_count
}
)
LOG.info(_('Regular file: %s created.') % path)
def _set_rw_permissions_for_all(self, nms, path):
"""Sets 666 permissions for the path.
:param nms: nms object
:param path: path to file
"""
nms.appliance.execute('chmod ugo+rw %s' % path)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
nfs_share = volume['provider_location']
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume['name'], 'volume')
def _get_mount_point_for_share(self, nfs_share):
"""Returns path to mount point NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
return os.path.join(self.configuration.nexenta_mount_point_base,
hashlib.md5(nfs_share).hexdigest())
def remote_path(self, volume):
"""Get volume path (mounted remotely fs path) for given volume.
:param volume: volume reference
"""
nfs_share = volume['provider_location']
share = nfs_share.split(':')[1].rstrip('/')
return '%s/%s/volume' % (share, volume['name'])
def _share_folder(self, nms, volume, folder):
"""Share NFS folder on NexentaStor Appliance.
:param nms: nms object
:param volume: volume name
:param folder: folder name
"""
path = '%s/%s' % (volume, folder.lstrip('/'))
share_opts = {
'read_write': '*',
'read_only': '',
'root': 'nobody',
'extra_options': 'anon=0',
'recursive': 'true',
'anonymous_rw': 'true',
}
LOG.debug(_('Sharing folder %s on Nexenta Store'), folder)
nms.netstorsvc.share_folder('svc:/network/nfs/server:default', path,
share_opts)
def _load_shares_config(self, share_file):
self.shares = {}
self.share2nms = {}
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/share_name http://user:pass@host:[port]/
# or
# host:/share_name http://user:pass@host:[port]/
# -o options=123,rw --other
if not share.strip():
continue
if share.startswith('#'):
continue
share_info = share.split(' ', 2)
share_address = share_info[0].strip().decode('unicode_escape')
nms_url = share_info[1].strip()
share_opts = share_info[2].strip() if len(share_info) > 2 else None
self.shares[share_address] = share_opts
self.share2nms[share_address] = self._get_nms_for_url(nms_url)
LOG.debug(_('Shares loaded: %s') % self.shares)
def _get_capacity_info(self, nfs_share):
"""Calculate available space on the NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
nms = self.share2nms[nfs_share]
ns_volume, ns_folder = self._get_share_datasets(nfs_share)
folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume,
ns_folder), '')
free = utils.str2size(folder_props['available'])
allocated = utils.str2size(folder_props['used'])
return free + allocated, free, allocated
def _get_nms_for_url(self, url):
"""Returns initialized nms object for url."""
auto, scheme, user, password, host, port, path =\
utils.parse_nms_url(url)
return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user,
password, auto=auto)
def _get_snapshot_volume(self, snapshot):
ctxt = context.get_admin_context()
return db.volume_get(ctxt, snapshot['volume_id'])
def _get_volroot(self, nms):
"""Returns volroot property value from NexentaStor appliance."""
if not self.nms_cache_volroot:
return nms.server.get_prop('volroot')
if nms not in self._nms2volroot:
self._nms2volroot[nms] = nms.server.get_prop('volroot')
return self._nms2volroot[nms]
def _get_share_datasets(self, nfs_share):
nms = self.share2nms[nfs_share]
volroot = self._get_volroot(nms)
path = nfs_share.split(':')[1][len(volroot):].strip('/')
volume_name = path.split('/')[0]
folder_name = '/'.join(path.split('/')[1:])
return volume_name, folder_name
| apache-2.0 |
graphite/TeX4Web-INVENIO | modules/miscutil/lib/plotextractor_getter.py | 4 | 23765 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import urllib, time, os, sys, re
from invenio.config import CFG_TMPDIR, \
CFG_PLOTEXTRACTOR_SOURCE_BASE_URL, \
CFG_PLOTEXTRACTOR_SOURCE_TARBALL_FOLDER, \
CFG_PLOTEXTRACTOR_SOURCE_PDF_FOLDER, \
CFG_PLOTEXTRACTOR_DOWNLOAD_TIMEOUT
from invenio.plotextractor_config import CFG_PLOTEXTRACTOR_DESY_BASE, \
CFG_PLOTEXTRACTOR_DESY_PIECE
from invenio.search_engine import get_record
from invenio.bibrecord import record_get_field_instances, \
field_get_subfield_values
from invenio.shellutils import run_shell_command
from invenio.plotextractor_output_utils import write_message
PDF_EXTENSION = '.pdf'
ARXIV_HEADER = 'arXiv:'
HEP_EX = ['hep-ex/', 9405, ARXIV_HEADER + 'hep-ex_'] # experimental
# a note about hep-ex: the hep-ex papers from 9403 nad 9404 are stored
# in arXiv's servers as hep-ph
HEP_LAT = ['hep-lat/', 9107, ARXIV_HEADER + 'hep-lat_'] # lattice
HEP_PH = ['hep-ph/', 9203, ARXIV_HEADER + 'hep-ph_'] # phenomenology
HEP_TH = ['hep-th/', 9108, ARXIV_HEADER + 'hep-th_'] # theory
HEP_AREAS = [HEP_EX, HEP_LAT, HEP_PH, HEP_TH]
URL = 0
BEGIN_YEAR_MONTH_INDEX = 1
AREA_STRING_INDEX = 2
URL_MOVE = int('0704')
CENTURY_END = int('9912')
CENTURY_BEGIN = int('0001')
ARBITRARY_FROM_DATE = int('9101')
FIX_FOR_YEAR_END = 88
current_yearmonth = int(('%02d%02d' % (time.localtime().tm_year, \
time.localtime().tm_mon))[2:])
"""
each of the areas of hep began in a different year and month.
beginning in 0704, i.e. April 2007, arXiv moved its URLS from
ARXIV_BASE + E_PRINT + HEP_AREA + <<numbernodot>>
to
ARXIV_BASE + E_PRINT + <<number.with.dot>>
the papers for a given month are numbered between yymm.0001 and yymm.9999
after the URL move, and before that they are between yymm001 and yymm999
"""
help_param = 'help'
dir_param = 'dir'
from_param = 'from'
from_index_param = 'fromindex'
ref_file_param = 'reffile'
single_param = 'single'
param_abbrs = 'hd:f:i:r:s:'
params = [help_param, dir_param + '=', from_param + '=', from_index_param + '=',
ref_file_param + '=', single_param + '=']
def harvest(to_dir, from_date, from_index):
"""
Calls upon arXiv using URLS as described above in order to grab
all the tarballs from HEP areas.
@param: dir (string): the directory where everything that gets
downloaded will sit
@param: from_date (int): the date from which we would like to harvest,
in YYMM format
@param: from_index (int): the index where we want to begin our harvest
in YYMM. i.e. we want to start with the 345th record in 1002.
@output: TONS OF .tar.gz FILES FROM ARXIV
@return: (none)
"""
global current_yearmonth
if from_date > current_yearmonth and from_date < ARBITRARY_FROM_DATE:
write_message('Please choose a from date that is not in the future!')
sys.exit(1)
if from_date % 100 > 12:
write_message('Please choose a from date in the form YYMM')
sys.exit(1)
if from_date >= ARBITRARY_FROM_DATE or from_date < URL_MOVE:
for area in HEP_AREAS:
yearmonthindex = area[BEGIN_YEAR_MONTH_INDEX]
# nasty casing!
# I find this particularly horrid because we have to wrap dates..
# i.e. although 9901 is more than 0001, we might want things in
# 0001 and not from 9901
if from_date < current_yearmonth:
# we want to start in the new century; skip the while below
yearmonthindex = CENTURY_END
elif from_date < CENTURY_END:
yearmonthindex = from_date
# grab stuff from between 92 and 99
old_URL_harvest(yearmonthindex, CENTURY_END, to_dir, area)
yearmonthindex = CENTURY_BEGIN
# more nasty casing
if from_date < URL_MOVE:
# that means we want to start sometime before the weird
# url change
yearmonthindex = from_date
elif from_date > URL_MOVE and from_date < ARBITRARY_FROM_DATE:
# we don't want to start yet
yearmonthindex = URL_MOVE
# grab stuff from between 00 and 07
old_URL_harvest(yearmonthindex, URL_MOVE, to_dir, area)
# also after the URL move, there was no distinction between
# papers from different areas. hence, outside the for loop
# even more nasty casing!
if from_date < current_yearmonth and from_date > URL_MOVE:
# we want to start someplace after the URL move and before now
yearmonthindex = from_date
else:
yearmonthindex = URL_MOVE
# grab stuff from between 07 and today
new_URL_harvest(yearmonthindex, from_index, to_dir)
def make_single_directory(to_dir, dirname):
"""
Makes a subdirectory for the arXiv record we are working with and
returns its exact location.
@param: to_dir (string): the name of the directory we want to make it
in
@param: dirname (string): the name of the directory we want to create
@output: a new directory called dirname located in to_dir
@return: the absolute path to the new directory
"""
new_dir = os.path.join(to_dir, dirname)
if not os.path.isdir(new_dir):
try:
os.mkdir(new_dir)
except OSError:
write_message('Failed to make new dir...')
return to_dir
return new_dir
def make_useful_directories(yearmonthindex, to_dir):
"""
Builds up the hierarchical filestructure for saving these things
in a useful way.
@param: yearmonthindex (int): YYMM
@param: to_dir (string): where we want to build the directories from
@return month_dir (string): the new directory we are going to put
stuff in
"""
year = yearmonthindex / 100
if year >= (ARBITRARY_FROM_DATE / 100):
year = '19%02d' % year
else:
year = '20%02d' % year
month = '%02d' % (yearmonthindex % 100)
year_dir = os.path.join(to_dir, year)
if not os.path.isdir(year_dir):
os.mkdir(year_dir)
month_dir = os.path.join(year_dir, month)
if not os.path.isdir(month_dir):
os.mkdir(month_dir)
return month_dir
def get_list_of_all_matching_files(basedir, filetypes):
"""
This function uses the os module in order tocrawl
through the directory tree rooted at basedir and find all the files
therein that include filetype in their 'file' output. Returns a list
of absolute paths to all files.
@param: basedir (string): the directory where we want to start crawling
@param: filetypes ([string, string]): something that will be contained in
the output of running 'file' on the types of files we're looking for
@return: file_paths ([string, string, ...]): a list of full paths to
the files that we discovered
"""
file_paths = []
for dirpath, dummy0, filenames in os.walk(basedir):
for filename in filenames:
full_path = os.path.join(dirpath, filename)
dummy1, cmd_out, dummy2 = run_shell_command('file %s', (full_path,))
for filetype in filetypes:
if cmd_out.find(filetype) > -1:
file_paths.append(full_path)
return file_paths
def tarballs_by_recids(recids, sdir):
"""
Take a string representing one recid or several and get the associated
tarballs for those ids.
@param: recids (string): the record id or ids
@param: sdir (string): where the tarballs should live
@return: tarballs ([string, string, ...]): locations of tarballs
"""
list_of_ids = []
if ',' in recids:
recids = recids.split(',')
for recid in recids:
if '-' in recid:
low, high = recid.split('-')
recid = range(int(low), int(high))
list_of_ids.extend(recid)
else:
recid = int(recid)
list_of_ids.append(recid)
else:
if '-' in recids:
low, high = recid.split('-')
list_of_ids = range(int(low), int(high))
else:
list_of_ids = int(recid)
arXiv_ids = []
for recid in list_of_ids:
rec = get_record(recid)
for afieldinstance in record_get_field_instances(rec, tag='037'):
if 'arXiv' == field_get_subfield_values(afieldinstance, '9')[0]:
arXiv_id = field_get_subfield_values(afieldinstance, 'a')[0]
arXiv_ids.append(arXiv_id)
return tarballs_by_arXiv_id(arXiv_ids, sdir)
def tarballs_by_arXiv_id(arXiv_ids, sdir):
"""
Takes an list of arXiv ids and downloads their tarballs
and returns a list of the tarballs' locations.
@param: arXiv_ids ([string, string, ...]): the arXiv ids you
would like to have tarballs for
@param: sdir (string): the place to download these tarballs to
@return: tarballs ([string, ...]): a list of the tarballs downloaded
"""
tarballs = []
for arXiv_id in arXiv_ids:
if 'arXiv' not in arXiv_id:
arXiv_id = 'arXiv:' + arXiv_id
tarball, dummy_pdf = harvest_single(arXiv_id, sdir, ("tarball",))
if tarball != None:
tarballs.append(tarball)
time.sleep(CFG_PLOTEXTRACTOR_DOWNLOAD_TIMEOUT)
return tarballs
def parse_and_download(infile, sdir):
"""
Read the write_messageation in the input file and download the corresponding
tarballs from arxiv.
@param: infile (string): the name of the file to parse
@param: sdir (string): where to put the downloaded tarballs
"""
tarfiles = []
tardir = os.path.join(sdir, 'tarballs')
if not os.path.isdir(tardir):
try:
os.makedirs(tardir)
except:
write_message(sys.exc_info()[0])
write_message('files will be loose, not in ' + tardir)
tardir = sdir
infile = open(infile)
for line in infile.readlines():
line = line.strip()
if line.startswith('http://'):
# hurray!
try:
url = line
filename = url.split('/')[-1]
filename = os.path.join(tardir, filename)
urllib.urlretrieve(url, filename)
tarfiles.append(filename)
write_message('Downloaded to ' + filename)
time.sleep(CFG_PLOTEXTRACTOR_DOWNLOAD_TIMEOUT) # be nice!
except:
write_message(filename + ' may already exist')
write_message(sys.exc_info()[0])
elif line.startswith('arXiv'):
tarfiles.extend(tarballs_by_arXiv_id([line.strip()], sdir))
return tarfiles
def harvest_single(single, to_dir, selection=("tarball", "pdf")):
"""
if we only want to harvest one id (arXiv or DESY), we can use this.
@param: single (string): an id from arXiv or DESY
@param: to_dir (string): where the output should be saved
@output: the PDF and source tarball (if applicable) of this single record
@return: (tarball, pdf): the location of the source tarball and PDF, None
if not found
"""
if single.find('arXiv') > -1 and \
CFG_PLOTEXTRACTOR_SOURCE_BASE_URL == 'http://arxiv.org/':
id_str = re.findall('[a-zA-Z\\-]+/\\d+|\\d+\\.\\d+', single)[0]
idno = id_str.split('/')
if len(idno) > 0:
idno = idno[-1]
yymm = int(idno[:4])
yymm_dir = make_useful_directories(yymm, to_dir)
url_for_file = CFG_PLOTEXTRACTOR_SOURCE_BASE_URL + CFG_PLOTEXTRACTOR_SOURCE_TARBALL_FOLDER + \
id_str
url_for_pdf = CFG_PLOTEXTRACTOR_SOURCE_BASE_URL + CFG_PLOTEXTRACTOR_SOURCE_PDF_FOLDER + \
id_str
individual_file = 'arXiv:' + id_str.replace('/', '_')
individual_dir = make_single_directory(yymm_dir, individual_file)
abs_path = os.path.join(individual_dir, individual_file)
tarball = abs_path
pdf = abs_path + '.pdf'
write_message('download ' + url_for_file + ' to ' + abs_path)
if "tarball" in selection and not download(url_for_file, individual_file, individual_dir):
write_message('download of tarball failed/skipped')
tarball = None
if "pdf" in selection and not download(url_for_pdf, individual_file + '.pdf', individual_dir):
write_message('download of pdf failed/skipped')
pdf = None
return (tarball, pdf)
elif single.find('arXiv') > -1 and CFG_PLOTEXTRACTOR_SOURCE_BASE_URL != '':
# hmm... is it a filesystem?
if CFG_PLOTEXTRACTOR_SOURCE_BASE_URL.startswith('/'):
if not os.path.exists(CFG_PLOTEXTRACTOR_SOURCE_BASE_URL):
write_message('PROBLEM WITH CFG_PLOTEXTRACTOR_SOURCE_BASE_URL: we cannot ' + \
'find this folder!')
return (None, None)
for root, files, dummy in os.walk(CFG_PLOTEXTRACTOR_SOURCE_BASE_URL):
for file_name in files:
id_no = single.replace('arXiv', '')
if file_name.find(id_no) > -1 or\
file_name.find(id_no.replace('/', '_')) > -1 or\
file_name.find(id_no.replace('_', '/')) > -1 or\
file_name.find(id_no.replace(':', '')) > -1:
# that's our file! probably.
return (os.path.join(root, file_name), None)
# well, no luck there
return (None, None)
# okay... is it... a website?
elif CFG_PLOTEXTRACTOR_SOURCE_BASE_URL.startswith('http') and "tarball" in selection:
url_for_file = CFG_PLOTEXTRACTOR_SOURCE_BASE_URL + single
individual_file = os.path.join(to_dir, single)
download(url_for_file, individual_file, to_dir)
return (individual_file, None)
# well, I don't know what to do with it
else:
write_message('unsure how to handle CFG_PLOTEXTRACTOR_SOURCE_BASE_URL. ' + \
'please fix the harvest_single function in ' + \
'miscutil/lib/plotextractor_getter.py')
return (None, None)
elif single.find('DESY') > -1 and "pdf" in selection:
# also okay!
idno = re.findall('\\d{2,4}-\\d{3}', single)[0]
year, number = idno.split('-')
if len(year) < 4:
if int(year) > 92:
year = '19' + year
else:
year = '20' + year
year_dir = make_single_directory(to_dir, year)
desy_dir = make_single_directory(year_dir, 'DESY')
individual_dir = make_single_directory(desy_dir, number)
id_no = year[2:] + '-' + number + '.pdf'
url_for_file = CFG_PLOTEXTRACTOR_DESY_BASE + year + \
CFG_PLOTEXTRACTOR_DESY_PIECE + id_no
individual_file = id_no
write_message('download ' + url_for_file + ' to ' + \
os.path.join(individual_dir, individual_file))
download(url_for_file, individual_file, individual_dir)
return (None, individual_file)
write_message('END')
return (None, None)
def src_pdf_from_marc(marc_file):
"""
Given a marc file, this function attempts to determine where to find
a pdf for that record
@param: marc_file (string): the location of a marc file we can look at
@return: pdfloc (string): the location of the downloaded PDF source file,
None if no pdf was downloaded
"""
if not os.path.exists(marc_file):
return None
marc_file = open(marc_file)
marc_text = marc_file.read()
marc_file.close()
arXiv_match = '(([a-zA-Z\\-]+/\\d{7})|(\\d{4}\\.\\d{4}))'
DESY_match = 'DESY-\\d{2,4}-\\d{3}'
pdf_loc = None
to_dir = os.path.join(CFG_TMPDIR, 'plotdata')
possible_match = re.search(arXiv_match, marc_text)
if possible_match != None:
# it's listed on arXiv, hooray!
arXiv_id = possible_match.group(0)
dummy1, pdf_loc = harvest_single(arXiv_id, to_dir, ("pdf",))
possible_match = re.search(DESY_match, marc_text)
if possible_match != None:
# it's listed on DESY, hooray!
desy_id = possible_match.group(0)
dummy1, pdf_loc = harvest_single(desy_id, to_dir, ("pdf",))
return pdf_loc
def harvest_from_file(filename, to_dir):
"""
Harvest from the file Tibor made.
Format of a single entry:
oai:arXiv.org:area/YYMMIII
or
oai:arXiv.org:YYMM.IIII
"""
ok_format = '^oai:arXiv.org:(([a-zA-Z\\-]+/\\d+)|(\\d+\\.\\d+))$'
try:
names_file = open(filename)
for arXiv_name in names_file.readlines():
if re.match(ok_format, arXiv_name) == None:
write_message('error on ' + arXiv_name + '. continuing.')
continue
harvest_single(arXiv_name, to_dir)
time.sleep(CFG_PLOTEXTRACTOR_DOWNLOAD_TIMEOUT)
except IOError:
write_message('Something is wrong with the file!')
def old_URL_harvest(from_date, to_date, to_dir, area):
"""
Grab all the PDFs and tarballs off arXiv between from_date and to_date,
where from_date and to_date are in YYMM form, and put them in their own
separate folders inside of to_dir. Folder hierarchy will be
to_dir/YYYY/MM/arXiv_id/stuff_downloaded_from_arXiv
this obeys the old URL format
@param: from_date (int): YYMM form of the date where we want to start
harvesting
@param: to_date (int): YYMM form of the date where we want to stop
harvesting
@param: to_dir (string): the base directory to put all these subdirs in
@param: area (int): the index in the HEP_AREAS array of the area we are
currently working on downloading
@output: PDFs and tarballs from arXiv in a hierarchy rooted at to_dir
@return: None
"""
yearmonthindex = from_date
while yearmonthindex < to_date:
sub_dir = make_useful_directories(yearmonthindex, to_dir)
for paperindex in range(1, 1000):
# for whatever reason, we can't count on these things to
# start at 1 (in HEP_PH from 9403 to CENTURY_END only).
# they start at frickin 202.
#if area == HEP_PH and yearmonthindex < ARBITRARY_FROM_INDEX:
# paperindex = paperindex + 201
# of note: before the URL change happened in 0704, it was
# also the case that the paper numbers only had 3 digits
next_to_harvest = '%04d%03d' % (yearmonthindex, paperindex)
arXiv_id = area[AREA_STRING_INDEX] + next_to_harvest
individual_dir = make_single_directory(sub_dir, arXiv_id)
full_url = CFG_PLOTEXTRACTOR_SOURCE_BASE_URL + CFG_PLOTEXTRACTOR_SOURCE_TARBALL_FOLDER + \
area[URL] + next_to_harvest
if not download(full_url, \
area[AREA_STRING_INDEX] + next_to_harvest, individual_dir):
break
full_pdf_url = CFG_PLOTEXTRACTOR_SOURCE_BASE_URL + CFG_PLOTEXTRACTOR_SOURCE_PDF_FOLDER + \
area[URL] + next_to_harvest
download(full_pdf_url, \
area[AREA_STRING_INDEX] + next_to_harvest + PDF_EXTENSION, \
individual_dir)
time.sleep(CFG_PLOTEXTRACTOR_DOWNLOAD_TIMEOUT)
if yearmonthindex % 100 == 12:
# we reached the end of the year!
yearmonthindex = yearmonthindex + FIX_FOR_YEAR_END
yearmonthindex = yearmonthindex + 1
def new_URL_harvest(from_date, from_index, to_dir):
"""
Grab all the PDFs and tarballs off arXiv between from_date and to_date,
where from_date and to_date are in YYMM form, and put them in their own
separate folders inside of to_dir. Folder hierarchy will be
to_dir/YYYY/MM/arXiv_id/stuff_downloaded_from_arXiv
this obeys the new URL format
@param: from_date (int): YYMM form of the date where we want to start
harvesting
@param: to_date (int): YYMM form of the date where we want to stop
harvesting
@param: to_dir (string): the base directory to put all these subdirs in
@output: PDFs and tarballs from arXiv in a hierarchy rooted at to_dir
@return: None
"""
global current_yearmonth
yearmonthindex = from_date
while yearmonthindex < current_yearmonth:
if yearmonthindex == from_date:
fro = from_index
else:
fro = 1
sub_dir = make_useful_directories(yearmonthindex, to_dir)
for paperindex in range(fro, 10000):
# of note: after the URL change happened in 0704, it was
# the case that paper numbers had 4 digits
next_to_harvest = '%04d.%04d' % (yearmonthindex, paperindex)
arXiv_id = ARXIV_HEADER + next_to_harvest
individual_dir = make_single_directory(sub_dir, arXiv_id)
full_url = CFG_PLOTEXTRACTOR_SOURCE_BASE_URL + CFG_PLOTEXTRACTOR_SOURCE_TARBALL_FOLDER + \
next_to_harvest
if not download(full_url, ARXIV_HEADER + next_to_harvest, \
individual_dir):
break
full_pdf_url = CFG_PLOTEXTRACTOR_SOURCE_BASE_URL + CFG_PLOTEXTRACTOR_SOURCE_PDF_FOLDER + \
next_to_harvest
download(full_pdf_url, \
ARXIV_HEADER + next_to_harvest + PDF_EXTENSION, \
individual_dir)
time.sleep(CFG_PLOTEXTRACTOR_DOWNLOAD_TIMEOUT) # be nice to remote server
if yearmonthindex % 100 == 12:
# we reached the end of the year!
yearmonthindex = yearmonthindex + FIX_FOR_YEAR_END
yearmonthindex = yearmonthindex + 1
def download(url, filename, to_dir):
"""
Actually does the call and download given a URL and desired output
filename.
@param: url (string): where the file lives on the interwebs
@param: filename (string): where the file should live after download
@param: to_dir (string): the dir where our new files will live
@output: a file in to_dir
@return: True on success, False on failure
"""
new_file = os.path.join(to_dir, filename)
try:
urllib.urlretrieve(url, new_file)
write_message('Downloaded to ' + new_file)
return True
except IOError:
# this could be a permissions error, but it probably means that
# there's nothing left in that section YYMM
write_message('Nothing at ' + new_file)
return False
| gpl-2.0 |
jaruba/chromium.src | tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/top_20.py | 21 | 3397 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class Top20Page(page_module.Page):
def __init__(self, url, page_set, name=''):
super(Top20Page, self).__init__(url=url, page_set=page_set, name=name)
self.archive_data_file = '../data/chrome_proxy_top_20.json'
class Top20PageSet(page_set_module.PageSet):
""" Pages hand-picked for Chrome Proxy tests. """
def __init__(self):
super(Top20PageSet, self).__init__(
archive_data_file='../data/chrome_proxy_top_20.json')
# Why: top google property; a google tab is often open
self.AddUserStory(Top20Page('https://www.google.com/#hl=en&q=barack+obama',
self))
# Why: #3 (Alexa global)
self.AddUserStory(Top20Page('http://www.youtube.com', self))
# Why: #18 (Alexa global), Picked an interesting post
self.AddUserStory(Top20Page(
# pylint: disable=C0301
'http://en.blog.wordpress.com/2012/09/04/freshly-pressed-editors-picks-for-august-2012/',
self, 'Wordpress'))
# Why: top social,Public profile
self.AddUserStory(Top20Page('http://www.facebook.com/barackobama', self,
'Facebook'))
# Why: #12 (Alexa global),Public profile
self.AddUserStory(Top20Page('http://www.linkedin.com/in/linustorvalds',
self, 'LinkedIn'))
# Why: #6 (Alexa) most visited worldwide,Picked an interesting page
self.AddUserStory(Top20Page('http://en.wikipedia.org/wiki/Wikipedia', self,
'Wikipedia (1 tab)'))
# Why: #8 (Alexa global),Picked an interesting page
self.AddUserStory(Top20Page('https://twitter.com/katyperry', self,
'Twitter'))
# Why: #37 (Alexa global)
self.AddUserStory(Top20Page('http://pinterest.com', self, 'Pinterest'))
# Why: #1 sports
self.AddUserStory(Top20Page('http://espn.go.com', self, 'ESPN'))
# Why: #1 news worldwide (Alexa global)
self.AddUserStory(Top20Page('http://news.yahoo.com', self))
# Why: #2 news worldwide
self.AddUserStory(Top20Page('http://www.cnn.com', self))
# Why: #7 (Alexa news); #27 total time spent,Picked interesting page
self.AddUserStory(Top20Page(
'http://www.weather.com/weather/right-now/Mountain+View+CA+94043',
self, 'Weather.com'))
# Why: #1 world commerce website by visits; #3 commerce in the US by time
# spent
self.AddUserStory(Top20Page('http://www.amazon.com', self))
# Why: #1 commerce website by time spent by users in US
self.AddUserStory(Top20Page('http://www.ebay.com', self))
# Why: #1 games according to Alexa (with actual games in it)
self.AddUserStory(Top20Page('http://games.yahoo.com', self))
# Why: #1 Alexa recreation
self.AddUserStory(Top20Page('http://booking.com', self))
# Why: #1 Alexa reference
self.AddUserStory(Top20Page('http://answers.yahoo.com', self))
# Why: #1 Alexa sports
self.AddUserStory(Top20Page('http://sports.yahoo.com/', self))
# Why: top tech blog
self.AddUserStory(Top20Page('http://techcrunch.com', self))
self.AddUserStory(Top20Page('http://www.nytimes.com', self))
| bsd-3-clause |
wooga/airflow | airflow/contrib/sensors/__init__.py | 15 | 1059 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This package is deprecated. Please use `airflow.sensors` or `airflow.providers.*.sensors`."""
import warnings
warnings.warn(
"This package is deprecated. Please use `airflow.sensors` or `airflow.providers.*.sensors`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 |
TobyRoseman/SFrame | cxxtest/build_tools/SCons/test/eprouvette.py | 51 | 6472 | #!/usr/bin/env python
# vim: fileencoding=utf-8
#-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v3
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
from __future__ import print_function
import os, sys
from os.path import isdir, isfile, islink, join
from optparse import OptionParser
from subprocess import check_call, CalledProcessError, PIPE
options = None
args = []
available_types = set(['scons'])
tool_stdout = PIPE
def main():
global options
global args
global tool_stdout
"""Parse the options and execute the program."""
usage = \
"""Usage: %prog [options] [test1 [test2 [...]]]
If you provide one or more tests, this will run the provided tests.
Otherwise, it will look for tests in the current directory and run them all.
"""
# option parsing
parser = OptionParser(usage)
parser.set_defaults(
action='run',
verbose=True)
parser.add_option("-c", "--clean",
action='store_const', const='clean', dest='action',
help="deletes any generated files in the tests")
parser.add_option("--run",
action='store_const', const='run', dest='action',
help="sets up the environment, compiles and runs the tests")
parser.add_option("-v", "--verbose",
action='store_true', dest='verbose',
help="spew out more details")
parser.add_option("-q", "--quiet",
action='store_false', dest='verbose',
help="spew out only success/failure of tests")
parser.add_option("--target-dir",
dest='target_dir', action='store', default='./',
help='target directory to look for tests in. default: %default')
parser.add_option("--debug",
dest='debug', action='store_true', default=False,
help='turn on debug output.')
(options, args) = parser.parse_args()
if options.debug or options.verbose:
tool_stdout = None
# gather the tests
tests = []
if len(args) == 0:
tests = crawl_tests(options.target_dir)
else:
tests = args
tests = purge_tests(tests)
# run the tests
if options.action == 'run':
for t in tests:
run_test(t)
elif options.action == 'clean':
for t in tests:
clean_test(t)
def crawl_tests(target):
"""Gather the directories in the test directory."""
files = os.listdir(target)
return [f for f in files if isdir(f) and f[0] != '.']
def purge_tests(dirs):
"""Look at the test candidates and purge those that aren't from the list"""
tests = []
for t in dirs:
if isfile(join(t, 'TestDef.py')):
tests.append(t)
else:
warn("{0} is not a test (missing TestDef.py file).".format(t))
return tests
def warn(msg):
"""A general warning function."""
if options.verbose:
print('[Warn]: ' + msg, file=sys.stderr)
def notice(msg):
"""A general print function."""
if options.verbose:
print(msg)
def debug(msg):
"""A debugging function"""
if options.debug:
print(msg)
def run_test(t):
"""Runs the test in directory t."""
opts = read_opts(t)
notice("-----------------------------------------------------")
notice("running test '{0}':\n".format(t))
readme = join(t, 'README')
if isfile(readme):
notice(open(readme).read())
notice("")
if opts['type'] not in available_types:
warn('{0} is not a recognised test type in {1}'.format(opts['type'], t))
return
if not opts['expect_success']:
warn("tests that fail intentionally are not yet supported.")
return
# set up the environment
setup_env(t, opts)
# run the test
try:
if opts['type'] == 'scons':
run_scons(t, opts)
except RuntimeError as e:
print("Test {0} failed.".format(t))
return
if not options.verbose:
print('.', end='')
sys.stdout.flush()
else:
print("test '{0}' successful.".format(t))
def read_opts(t):
"""Read the test options and return them."""
opts = {
'expect_success' : True,
'type' : 'scons',
'links' : {}
}
f = open(join(t, "TestDef.py"))
exec(f.read(), opts)
return opts
def setup_env(t, opts):
"""Set up the environment for the test."""
# symlinks
links = opts['links']
for link in links:
frm = links[link]
to = join(t, link)
debug("Symlinking {0} to {1}".format(frm, to))
if islink(to):
os.unlink(to)
os.symlink(frm, to)
def teardown_env(t, opts):
"""Remove all files generated for the test."""
links = opts['links']
for link in links:
to = join(t, link)
debug('removing link {0}'.format(to))
os.unlink(to)
def clean_test(t):
"""Remove all generated files."""
opts = read_opts(t)
notice("cleaning test {0}".format(t))
if opts['type'] == 'scons':
setup_env(t, opts) # scons needs the environment links to work
clean_scons(t, opts)
teardown_env(t, opts)
def clean_scons(t, opts):
"""Make scons clean after itself."""
cwd = os.getcwd()
os.chdir(t)
try:
check_call(['scons', '--clean'], stdout=tool_stdout, stderr=None)
except CalledProcessError as e:
warn("SCons failed with error {0}".format(e.returncode))
os.chdir(cwd)
sconsign = join(t, '.sconsign.dblite')
if isfile(sconsign):
os.unlink(sconsign)
def run_scons(t, opts):
"""Run scons test."""
cwd = os.getcwd()
os.chdir(t)
try:
check_call(['scons', '--clean'], stdout=tool_stdout)
check_call(['scons', '.'], stdout=tool_stdout)
check_call(['scons', 'check'], stdout=tool_stdout)
except CalledProcessError as e:
os.chdir(cwd) # clean up
raise e
os.chdir(cwd)
if __name__ == "__main__":
main()
if not options.verbose:
print() # quiet doesn't output newlines.
| bsd-3-clause |
alexanderturner/ansible | lib/ansible/modules/system/user.py | 14 | 76140 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: user
author: "Stephen Fromm (@sfromm)"
version_added: "0.2"
short_description: Manage user accounts
notes:
- There are specific requirements per platform on user management utilities. However
they generally come pre-installed with the system and Ansible will require they
are present at runtime. If they are not, a descriptive error message will be shown.
description:
- Manage user accounts and user attributes.
options:
name:
required: true
aliases: [ "user" ]
description:
- Name of the user to create, remove or modify.
comment:
required: false
description:
- Optionally sets the description (aka I(GECOS)) of user account.
uid:
required: false
description:
- Optionally sets the I(UID) of the user.
non_unique:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Optionally when used with the -u option, this option allows to
change the user ID to a non-unique value.
version_added: "1.1"
seuser:
required: false
description:
- Optionally sets the seuser type (user_u) on selinux enabled systems.
version_added: "2.1"
group:
required: false
description:
- Optionally sets the user's primary group (takes a group name).
groups:
required: false
description:
- Puts the user in list of groups. When set to the empty string ('groups='),
the user is removed from all groups except the primary group.
- Before version 2.3, the only input format allowed was a 'comma separated string',
now it should be able to accept YAML lists also.
append:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If C(yes), will only add groups, not set them to just the list
in I(groups).
shell:
required: false
description:
- Optionally set the user's shell.
home:
required: false
description:
- Optionally set the user's home directory.
skeleton:
required: false
description:
- Optionally set a home skeleton directory. Requires createhome option!
version_added: "2.0"
password:
required: false
description:
- Optionally set the user's password to this crypted value. See
the user example in the github examples directory for what this looks
like in a playbook. See U(http://docs.ansible.com/ansible/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module)
for details on various ways to generate these password values.
Note on Darwin system, this value has to be cleartext.
Beware of security issues.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the account should exist or not, taking action if the state is different from what is stated.
createhome:
required: false
default: "yes"
choices: [ "yes", "no" ]
description:
- Unless set to C(no), a home directory will be made for the user
when the account is created or if the home directory does not
exist.
move_home:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If set to C(yes) when used with C(home=), attempt to move the
user's home directory to the specified directory if it isn't there
already.
system:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- When creating an account, setting this to C(yes) makes the user a
system account. This setting cannot be changed on existing users.
force:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- When used with C(state=absent), behavior is as with
C(userdel --force).
login_class:
required: false
description:
- Optionally sets the user's login class for FreeBSD, OpenBSD and NetBSD systems.
remove:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- When used with C(state=absent), behavior is as with
C(userdel --remove).
generate_ssh_key:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "0.9"
description:
- Whether to generate a SSH key for the user in question.
This will B(not) overwrite an existing SSH key.
ssh_key_bits:
required: false
default: default set by ssh-keygen
version_added: "0.9"
description:
- Optionally specify number of bits in SSH key to create.
ssh_key_type:
required: false
default: rsa
version_added: "0.9"
description:
- Optionally specify the type of SSH key to generate.
Available SSH key types will depend on implementation
present on target host.
ssh_key_file:
required: false
default: .ssh/id_rsa
version_added: "0.9"
description:
- Optionally specify the SSH key filename. If this is a relative
filename then it will be relative to the user's home directory.
ssh_key_comment:
required: false
default: ansible-generated on $HOSTNAME
version_added: "0.9"
description:
- Optionally define the comment for the SSH key.
ssh_key_passphrase:
required: false
version_added: "0.9"
description:
- Set a passphrase for the SSH key. If no
passphrase is provided, the SSH key will default to
having no passphrase.
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "1.3"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
expires:
version_added: "1.9"
required: false
default: "None"
description:
- An expiry time for the user in epoch, it will be ignored on platforms that do not support this.
Currently supported on Linux and FreeBSD.
'''
EXAMPLES = '''
# Add the user 'johnd' with a specific uid and a primary group of 'admin'
- user:
name: johnd
comment: "John Doe"
uid: 1040
group: admin
# Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
- user:
name: james
shell: /bin/bash
groups: admins,developers
append: yes
# Remove the user 'johnd'
- user:
name: johnd
state: absent
remove: yes
# Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa
- user:
name: jsmith
generate_ssh_key: yes
ssh_key_bits: 2048
ssh_key_file: .ssh/id_rsa
# added a consultant whose account you want to expire
- user:
name: james18
shell: /bin/zsh
groups: developers
expires: 1422403387
'''
import os
import pwd
import grp
import platform
import socket
import time
import shutil
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import load_platform_subclass, AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
try:
import spwd
HAVE_SPWD=True
except:
HAVE_SPWD=False
class User(object):
"""
This is a generic User manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- create_user()
- remove_user()
- modify_user()
- ssh_key_gen()
- ssh_key_fingerprint()
- user_exists()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
SHADOWFILE = '/etc/shadow'
DATE_FORMAT = '%Y-%m-%d'
def __new__(cls, *args, **kwargs):
return load_platform_subclass(User, args, kwargs)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.uid = module.params['uid']
self.non_unique = module.params['non_unique']
self.seuser = module.params['seuser']
self.group = module.params['group']
self.comment = module.params['comment']
self.shell = module.params['shell']
self.password = module.params['password']
self.force = module.params['force']
self.remove = module.params['remove']
self.createhome = module.params['createhome']
self.move_home = module.params['move_home']
self.skeleton = module.params['skeleton']
self.system = module.params['system']
self.login_class = module.params['login_class']
self.append = module.params['append']
self.sshkeygen = module.params['generate_ssh_key']
self.ssh_bits = module.params['ssh_key_bits']
self.ssh_type = module.params['ssh_key_type']
self.ssh_comment = module.params['ssh_key_comment']
self.ssh_passphrase = module.params['ssh_key_passphrase']
self.update_password = module.params['update_password']
self.home = module.params['home']
self.expires = None
self.groups = None
if module.params['groups'] is not None:
self.groups = ','.join(module.params['groups'])
if module.params['expires']:
try:
self.expires = time.gmtime(module.params['expires'])
except Exception:
e = get_exception()
module.fail_json("Invalid expires time %s: %s" %(self.expires, str(e)))
if module.params['ssh_key_file'] is not None:
self.ssh_file = module.params['ssh_key_file']
else:
self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
def execute_command(self, cmd, use_unsafe_shell=False, data=None, obey_checkmode=True):
if self.module.check_mode and obey_checkmode:
self.module.debug('In check mode, would have run: "%s"' % cmd)
return (0, '','')
else:
# cast all args to strings ansible-modules-core/issues/4397
cmd = [str(x) for x in cmd]
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.force:
cmd.append('-f')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self, command_name='useradd'):
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.seuser is not None:
cmd.append('-Z')
cmd.append(self.seuser)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
elif self.group_exists(self.name):
# use the -N option (no user group) if a group already
# exists with the same name as the user to prevent
# errors from useradd trying to create a group when
# USERGROUPS_ENAB is set in /etc/login.defs.
if os.path.exists('/etc/redhat-release'):
dist = platform.dist()
major_release = int(dist[1].split('.')[0])
if major_release <= 5:
cmd.append('-n')
else:
cmd.append('-N')
else:
cmd.append('-N')
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.expires:
cmd.append('--expiredate')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def _check_usermod_append(self):
# check if this version of usermod can append groups
usermod_path = self.module.get_bin_path('usermod', True)
# for some reason, usermod --help cannot be used by non root
# on RH/Fedora, due to lack of execute bit for others
if not os.access(usermod_path, os.X_OK):
return False
cmd = [usermod_path, '--help']
(rc, data1, data2) = self.execute_command(cmd, obey_checkmode=False)
helpout = data1 + data2
# check if --append exists
lines = to_native(helpout).split('\n')
for line in lines:
if line.strip().startswith('-a, --append'):
return True
return False
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
# get a list of all groups for the user, including the primary
current_groups = self.user_group_membership(exclude_primary=False)
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
if has_append:
cmd.append('-a')
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if self.append and not has_append:
cmd.append('-A')
cmd.append(','.join(group_diff))
else:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.expires:
cmd.append('--expiredate')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def group_exists(self,group):
try:
# Try group as a gid first
grp.getgrgid(int(group))
return True
except (ValueError, KeyError):
try:
grp.getgrnam(group)
return True
except KeyError:
return False
def group_info(self, group):
if not self.group_exists(group):
return False
try:
# Try group as a gid first
return list(grp.getgrgid(int(group)))
except (ValueError, KeyError):
return list(grp.getgrnam(group))
def get_groups_set(self, remove_existing=True):
if self.groups is None:
return None
info = self.user_info()
groups = set(filter(None, self.groups.split(',')))
for g in set(groups):
if not self.group_exists(g):
self.module.fail_json(msg="Group %s does not exist" % (g))
if info and remove_existing and self.group_info(g)[2] == info[3]:
groups.remove(g)
return groups
def user_group_membership(self, exclude_primary=True):
''' Return a list of groups the user belongs to '''
groups = []
info = self.get_pwd_info()
for group in grp.getgrall():
if self.name in group.gr_mem:
# Exclude the user's primary group by default
if not exclude_primary:
groups.append(group[0])
else:
if info[3] != group.gr_gid:
groups.append(group[0])
return groups
def user_exists(self):
try:
if pwd.getpwnam(self.name):
return True
except KeyError:
return False
def get_pwd_info(self):
if not self.user_exists():
return False
return list(pwd.getpwnam(self.name))
def user_info(self):
if not self.user_exists():
return False
info = self.get_pwd_info()
if len(info[1]) == 1 or len(info[1]) == 0:
info[1] = self.user_password()
return info
def user_password(self):
passwd = ''
if HAVE_SPWD:
try:
passwd = spwd.getspnam(self.name)[1]
except KeyError:
return passwd
if not self.user_exists():
return passwd
elif self.SHADOWFILE:
# Read shadow file for user's encrypted password string
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
for line in open(self.SHADOWFILE).readlines():
if line.startswith('%s:' % self.name):
passwd = line.split(':')[1]
return passwd
def get_ssh_key_path(self):
info = self.user_info()
if os.path.isabs(self.ssh_file):
ssh_key_file = self.ssh_file
else:
ssh_key_file = os.path.join(info[5], self.ssh_file)
return ssh_key_file
def ssh_key_gen(self):
info = self.user_info()
if not os.path.exists(info[5]) and not self.module.check_mode:
return (1, '', 'User %s home directory does not exist' % self.name)
ssh_key_file = self.get_ssh_key_path()
ssh_dir = os.path.dirname(ssh_key_file)
if not os.path.exists(ssh_dir):
if self.module.check_mode:
return (0, '', '')
try:
os.mkdir(ssh_dir, int('0700', 8))
os.chown(ssh_dir, info[2], info[3])
except OSError:
e = get_exception()
return (1, '', 'Failed to create %s: %s' % (ssh_dir, str(e)))
if os.path.exists(ssh_key_file):
return (None, 'Key already exists', '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-t')
cmd.append(self.ssh_type)
if self.ssh_bits > 0:
cmd.append('-b')
cmd.append(self.ssh_bits)
cmd.append('-C')
cmd.append(self.ssh_comment)
cmd.append('-f')
cmd.append(ssh_key_file)
cmd.append('-N')
if self.ssh_passphrase is not None:
cmd.append(self.ssh_passphrase)
else:
cmd.append('')
(rc, out, err) = self.execute_command(cmd)
if rc == 0 and not self.module.check_mode:
# If the keys were successfully created, we should be able
# to tweak ownership.
os.chown(ssh_key_file, info[2], info[3])
os.chown('%s.pub' % ssh_key_file, info[2], info[3])
return (rc, out, err)
def ssh_key_fingerprint(self):
ssh_key_file = self.get_ssh_key_path()
if not os.path.exists(ssh_key_file):
return (1, 'SSH Key file %s does not exist' % ssh_key_file, '')
cmd = [ self.module.get_bin_path('ssh-keygen', True) ]
cmd.append('-l')
cmd.append('-f')
cmd.append(ssh_key_file)
return self.execute_command(cmd, obey_checkmode=False)
def get_ssh_public_key(self):
ssh_public_key_file = '%s.pub' % self.get_ssh_key_path()
try:
f = open(ssh_public_key_file)
ssh_public_key = f.read().strip()
f.close()
except IOError:
return None
return ssh_public_key
def create_user(self):
# by default we use the create_user_useradd method
return self.create_user_useradd()
def remove_user(self):
# by default we use the remove_user_userdel method
return self.remove_user_userdel()
def modify_user(self):
# by default we use the modify_user_usermod method
return self.modify_user_usermod()
def create_homedir(self, path):
if not os.path.exists(path):
if self.skeleton is not None:
skeleton = self.skeleton
else:
skeleton = '/etc/skel'
if os.path.exists(skeleton):
try:
shutil.copytree(skeleton, path, symlinks=True)
except OSError:
e = get_exception()
self.module.exit_json(failed=True, msg="%s" % e)
else:
try:
os.makedirs(path)
except OSError:
e = get_exception()
self.module.exit_json(failed=True, msg="%s" % e)
def chown_homedir(self, uid, gid, path):
try:
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chown(path, uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
except OSError:
e = get_exception()
self.module.exit_json(failed=True, msg="%s" % e)
# ===========================================
class FreeBsdUser(User):
"""
This is a FreeBSD User manipulation class - it uses the pw command
to manipulate the user database, followed by the chpass command
to change the password.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'FreeBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def remove_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'userdel',
'-n',
self.name
]
if self.remove:
cmd.append('-r')
return self.execute_command(cmd)
def create_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'useradd',
'-n',
self.name,
]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.createhome:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.expires:
days =( time.mktime(self.expires) - time.time() ) / 86400
cmd.append('-e')
cmd.append(str(int(days)))
# system cannot be handled currently - should we error if its requested?
# create the user
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password in a second command
if self.password is not None:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
return (rc, out, err)
def modify_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'usermod',
'-n',
self.name
]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
for line in open(self.SHADOWFILE).readlines():
if line.startswith('%s:' % self.name):
user_login_class = line.split(':')[4]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.expires:
days = ( time.mktime(self.expires) - time.time() ) / 86400
cmd.append('-e')
cmd.append(str(int(days)))
# modify the user if cmd will do anything
if cmd_len != len(cmd):
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password in a second command
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
return (rc, out, err)
# ===========================================
class OpenBSDUser(User):
"""
This is a OpenBSD User manipulation class.
Main differences are that OpenBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'OpenBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None and self.password != '*':
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups_option = '-G'
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_option = '-S'
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append(groups_option)
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name]
(rc, out, err) = self.execute_command(userinfo_cmd, obey_checkmode=False)
for line in out.splitlines():
tokens = line.split()
if tokens[0] == 'class' and len(tokens) == 2:
user_login_class = tokens[1]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None \
and self.password != '*' and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class NetBSDUser(User):
"""
This is a NetBSD User manipulation class.
Main differences are that NetBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'NetBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups = set(current_groups).union(groups)
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class SunOS(User):
"""
This is a SunOS User manipulation class - The main difference between
this class and the generic user class is that Solaris-type distros
don't support the concept of a "system" account and we need to
edit the /etc/shadow file manually to set a password. (Ugh)
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'SunOS'
distribution = None
SHADOWFILE = '/etc/shadow'
def get_password_defaults(self):
# Read password aging defaults
try:
minweeks = ''
maxweeks = ''
warnweeks = ''
for line in open("/etc/default/passwd", 'r'):
line = line.strip()
if (line.startswith('#') or line == ''):
continue
key, value = line.split('=')
if key == "MINWEEKS":
minweeks = value.rstrip('\n')
elif key == "MAXWEEKS":
maxweeks = value.rstrip('\n')
elif key == "WARNWEEKS":
warnweeks = value.rstrip('\n')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read /etc/default/passwd: %s" % str(err))
return (minweeks, maxweeks, warnweeks)
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.createhome:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
if not self.module.check_mode:
# we have to set the password by editing the /etc/shadow file
if self.password is not None:
minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() / 86400))
if minweeks:
fields[3] = str(int(minweeks) * 7)
if maxweeks:
fields[4] = str(int(maxweeks) * 7)
if warnweeks:
fields[5] = str(int(warnweeks) * 7)
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update users password: %s" % str(err))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups.update(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
# modify the user if cmd will do anything
if cmd_len != len(cmd):
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password by editing the /etc/shadow file
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
(rc, out, err) = (0, '', '')
if not self.module.check_mode:
minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() / 86400))
if minweeks:
fields[3] = str(int(minweeks) * 7)
if maxweeks:
fields[4] = str(int(maxweeks) * 7)
if warnweeks:
fields[5] = str(int(warnweeks) * 7)
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
rc = 0
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update users password: %s" % str(err))
return (rc, out, err)
# ===========================================
class DarwinUser(User):
"""
This is a Darwin Mac OS X User manipulation class.
Main differences are that Darwin:-
- Handles accounts in a database managed by dscl(1)
- Has no useradd/groupadd
- Does not create home directories
- User password must be cleartext
- UID must be given
- System users must ben under 500
This overrides the following methods from the generic class:-
- user_exists()
- create_user()
- remove_user()
- modify_user()
"""
platform = 'Darwin'
distribution = None
SHADOWFILE = None
dscl_directory = '.'
fields = [
('comment', 'RealName'),
('home', 'NFSHomeDirectory'),
('shell', 'UserShell'),
('uid', 'UniqueID'),
('group', 'PrimaryGroupID'),
]
def _get_dscl(self):
return [ self.module.get_bin_path('dscl', True), self.dscl_directory ]
def _list_user_groups(self):
cmd = self._get_dscl()
cmd += [ '-search', '/Groups', 'GroupMembership', self.name ]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
groups = []
for line in out.splitlines():
if line.startswith(' ') or line.startswith(')'):
continue
groups.append(line.split()[0])
return groups
def _get_user_property(self, property):
'''Return user PROPERTY as given my dscl(1) read or None if not found.'''
cmd = self._get_dscl()
cmd += [ '-read', '/Users/%s' % self.name, property ]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
return None
# from dscl(1)
# if property contains embedded spaces, the list will instead be
# displayed one entry per line, starting on the line after the key.
lines = out.splitlines()
#sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines))
if len(lines) == 1:
return lines[0].split(': ')[1]
else:
if len(lines) > 2:
return '\n'.join([ lines[1].strip() ] + lines[2:])
else:
if len(lines) == 2:
return lines[1].strip()
else:
return None
def _get_next_uid(self, system=None):
'''
Return the next available uid. If system=True, then
uid should be below of 500, if possible.
'''
cmd = self._get_dscl()
cmd += ['-list', '/Users', 'UniqueID']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
self.module.fail_json(
msg="Unable to get the next available uid",
rc=rc,
out=out,
err=err
)
max_uid = 0
max_system_uid = 0
for line in out.splitlines():
current_uid = int(line.split(' ')[-1])
if max_uid < current_uid:
max_uid = current_uid
if max_system_uid < current_uid and current_uid < 500:
max_system_uid = current_uid
if system and (0 < max_system_uid < 499):
return max_system_uid + 1
return max_uid + 1
def _change_user_password(self):
'''Change password for SELF.NAME against SELF.PASSWORD.
Please note that password must be cleartext.
'''
# some documentation on how is stored passwords on OSX:
# http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/
# http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/
# http://pastebin.com/RYqxi7Ca
# on OSX 10.8+ hash is SALTED-SHA512-PBKDF2
# https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html
# https://gist.github.com/nueh/8252572
cmd = self._get_dscl()
if self.password:
cmd += [ '-passwd', '/Users/%s' % self.name, self.password]
else:
cmd += [ '-create', '/Users/%s' % self.name, 'Password', '*']
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Error when changing password', err=err, out=out, rc=rc)
return (rc, out, err)
def _make_group_numerical(self):
'''Convert SELF.GROUP to is stringed numerical value suitable for dscl.'''
if self.group is None:
self.group = 'nogroup'
try:
self.group = grp.getgrnam(self.group).gr_gid
except KeyError:
self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group)
# We need to pass a string to dscl
self.group = str(self.group)
def __modify_group(self, group, action):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
if action == 'add':
option = '-a'
else:
option = '-d'
cmd = [ 'dseditgroup', '-o', 'edit', option, self.name, '-t', 'user', group ]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot %s user "%s" to group "%s".'
% (action, self.name, group), err=err, out=out, rc=rc)
return (rc, out, err)
def _modify_group(self):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
rc = 0
out = ''
err = ''
changed = False
current = set(self._list_user_groups())
if self.groups is not None:
target = set(self.groups.split(','))
else:
target = set([])
if self.append is False:
for remove in current - target:
(_rc, _err, _out) = self.__modify_group(remove, 'delete')
rc += rc
out += _out
err += _err
changed = True
for add in target - current:
(_rc, _err, _out) = self.__modify_group(add, 'add')
rc += _rc
out += _out
err += _err
changed = True
return (rc, err, out, changed)
def _update_system_user(self):
'''Hide or show user on login window according SELF.SYSTEM.
Returns 0 if a change has been made, None otherwhise.'''
plist_file = '/Library/Preferences/com.apple.loginwindow.plist'
# http://support.apple.com/kb/HT5017?viewlocale=en_US
cmd = [ 'defaults', 'read', plist_file, 'HiddenUsersList' ]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
# returned value is
# (
# "_userA",
# "_UserB",
# userc
# )
hidden_users = []
for x in out.splitlines()[1:-1]:
try:
x = x.split('"')[1]
except IndexError:
x = x.strip()
hidden_users.append(x)
if self.system:
if not self.name in hidden_users:
cmd = [ 'defaults', 'write', plist_file,
'HiddenUsersList', '-array-add', self.name ]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json( msg='Cannot user "%s" to hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
else:
if self.name in hidden_users:
del(hidden_users[hidden_users.index(self.name)])
cmd = [ 'defaults', 'write', plist_file, 'HiddenUsersList', '-array' ] + hidden_users
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json( msg='Cannot remove user "%s" from hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
def user_exists(self):
'''Check is SELF.NAME is a known user on the system.'''
cmd = self._get_dscl()
cmd += [ '-list', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
return rc == 0
def remove_user(self):
'''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.'''
info = self.user_info()
cmd = self._get_dscl()
cmd += [ '-delete', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json( msg='Cannot delete user "%s".' % self.name, err=err, out=out, rc=rc)
if self.force:
if os.path.exists(info[5]):
shutil.rmtree(info[5])
out += "Removed %s" % info[5]
return (rc, out, err)
def create_user(self, command_name='dscl'):
cmd = self._get_dscl()
cmd += [ '-create', '/Users/%s' % self.name]
(rc, err, out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json( msg='Cannot create user "%s".' % self.name, err=err, out=out, rc=rc)
self._make_group_numerical()
if self.uid is None:
self.uid = str(self._get_next_uid(self.system))
# Homedir is not created by default
if self.createhome:
if self.home is None:
self.home = '/Users/%s' % self.name
if not self.module.check_mode:
if not os.path.exists(self.home):
os.makedirs(self.home)
self.chown_homedir(int(self.uid), int(self.group), self.home)
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += [ '-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json( msg='Cannot add property "%s" to user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
out += _out
err += _err
if rc != 0:
return (rc, _err, _out)
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
self._update_system_user()
# here we don't care about change status since it is a creation,
# thus changed is always true.
if self.groups:
(rc, _out, _err, changed) = self._modify_group()
out += _out
err += _err
return (rc, err, out)
def modify_user(self):
changed = None
out = ''
err = ''
if self.group:
self._make_group_numerical()
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
current = self._get_user_property(field[1])
if current is None or current != self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += [ '-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot update property "%s" for user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
changed = rc
out += _out
err += _err
if self.update_password == 'always' and self.password is not None:
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
changed = rc
if self.groups:
(rc, _out, _err, _changed) = self._modify_group()
out += _out
err += _err
if _changed is True:
changed = rc
rc = self._update_system_user()
if rc == 0:
changed = rc
return (changed, out, err)
# ===========================================
class AIX(User):
"""
This is a AIX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'AIX'
distribution = None
SHADOWFILE = '/etc/security/passwd'
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self, command_name='useradd'):
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.createhome:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.password is not None:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
# skip if no changes to be made
if len(cmd) == 1:
(rc, out, err) = (None, '', '')
else:
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
(rc2, out2, err2) = self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password))
else:
(rc2, out2, err2) = (None, '', '')
if rc is not None:
return (rc, out+out2, err+err2)
else:
return (rc2, out+out2, err+err2)
# ===========================================
class HPUX(User):
"""
This is a HP-UX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'HP-UX'
distribution = None
SHADOWFILE = '/etc/shadow'
def create_user(self):
cmd = ['/usr/sam/lbin/useradd.sam']
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user(self):
cmd = ['/usr/sam/lbin/userdel.sam']
if self.force:
cmd.append('-F')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = ['/usr/sam/lbin/usermod.sam']
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
if has_append:
cmd.append('-a')
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if self.append and not has_append:
cmd.append('-A')
cmd.append(','.join(group_diff))
else:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
def main():
ssh_defaults = {
'bits': 0,
'type': 'rsa',
'passphrase': None,
'comment': 'ansible-generated on %s' % socket.gethostname()
}
module = AnsibleModule(
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, aliases=['user'], type='str'),
uid=dict(default=None, type='str'),
non_unique=dict(default='no', type='bool'),
group=dict(default=None, type='str'),
groups=dict(default=None, type='list'),
comment=dict(default=None, type='str'),
home=dict(default=None, type='path'),
shell=dict(default=None, type='str'),
password=dict(default=None, type='str', no_log=True),
login_class=dict(default=None, type='str'),
# following options are specific to selinux
seuser=dict(default=None, type='str'),
# following options are specific to userdel
force=dict(default='no', type='bool'),
remove=dict(default='no', type='bool'),
# following options are specific to useradd
createhome=dict(default='yes', type='bool'),
skeleton=dict(default=None, type='str'),
system=dict(default='no', type='bool'),
# following options are specific to usermod
move_home=dict(default='no', type='bool'),
append=dict(default='no', type='bool'),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(default=ssh_defaults['bits'], type='int'),
ssh_key_type=dict(default=ssh_defaults['type'], type='str'),
ssh_key_file=dict(default=None, type='path'),
ssh_key_comment=dict(default=ssh_defaults['comment'], type='str'),
ssh_key_passphrase=dict(default=None, type='str', no_log=True),
update_password=dict(default='always',choices=['always','on_create'],type='str'),
expires=dict(default=None, type='float'),
),
supports_check_mode=True
)
user = User(module)
module.debug('User instantiated - platform %s' % user.platform)
if user.distribution:
module.debug('User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.create_user()
if module.check_mode:
result['system'] = user.name
else:
result['system'] = user.system
result['createhome'] = user.createhome
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists():
info = user.user_info()
if info == False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
result['uid'] = info[2]
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.createhome:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
# deal with ssh key
if user.sshkeygen:
# generate ssh key (note: this function is check mode aware)
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
module.exit_json(**result)
# import module snippets
if __name__ == '__main__':
main()
| gpl-3.0 |
bright-sparks/chromium-spacewalk | third_party/tlslite/tlslite/checker.py | 118 | 2756 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Class for post-handshake certificate checking."""
from .x509 import X509
from .x509certchain import X509CertChain
from .errors import *
class Checker(object):
"""This class is passed to a handshake function to check the other
party's certificate chain.
If a handshake function completes successfully, but the Checker
judges the other party's certificate chain to be missing or
inadequate, a subclass of
L{tlslite.errors.TLSAuthenticationError} will be raised.
Currently, the Checker can check an X.509 chain.
"""
def __init__(self,
x509Fingerprint=None,
checkResumedSession=False):
"""Create a new Checker instance.
You must pass in one of these argument combinations:
- x509Fingerprint
@type x509Fingerprint: str
@param x509Fingerprint: A hex-encoded X.509 end-entity
fingerprint which the other party's end-entity certificate must
match.
@type checkResumedSession: bool
@param checkResumedSession: If resumed sessions should be
checked. This defaults to False, on the theory that if the
session was checked once, we don't need to bother
re-checking it.
"""
self.x509Fingerprint = x509Fingerprint
self.checkResumedSession = checkResumedSession
def __call__(self, connection):
"""Check a TLSConnection.
When a Checker is passed to a handshake function, this will
be called at the end of the function.
@type connection: L{tlslite.tlsconnection.TLSConnection}
@param connection: The TLSConnection to examine.
@raise tlslite.errors.TLSAuthenticationError: If the other
party's certificate chain is missing or bad.
"""
if not self.checkResumedSession and connection.resumed:
return
if self.x509Fingerprint:
if connection._client:
chain = connection.session.serverCertChain
else:
chain = connection.session.clientCertChain
if self.x509Fingerprint:
if isinstance(chain, X509CertChain):
if self.x509Fingerprint:
if chain.getFingerprint() != self.x509Fingerprint:
raise TLSFingerprintError(\
"X.509 fingerprint mismatch: %s, %s" % \
(chain.getFingerprint(), self.x509Fingerprint))
elif chain:
raise TLSAuthenticationTypeError()
else:
raise TLSNoAuthenticationError() | bsd-3-clause |
EMSTrack/WebServerAndClient | emstrack/models.py | 1 | 3486 | from django.contrib.auth.models import User
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
from django.template.defaulttags import register
from django.utils.translation import ugettext_lazy as _
# filters
from django.utils import timezone
from django.utils.safestring import mark_safe
@register.filter(is_safe=True)
def get_check(key):
if key:
return mark_safe('<span class="fas fa-check"></span>')
else:
return ''
@register.filter(is_safe=True)
def get_times(key):
if key:
return ''
else:
return mark_safe('<span class="fas fa-times"></span>')
@register.filter(is_safe=True)
def get_check_or_times(key):
if key:
return mark_safe('<span class="fas fa-check"></span>')
else:
return mark_safe('<span class="fas fa-times"></span>')
defaults = {
'location': Point(-117.0382, 32.5149, srid=4326),
'state': 'BCN',
'city': 'Tijuana',
'country': 'MX',
}
class AddressModel(models.Model):
"""
An abstract base class model that provides address fields.
"""
number = models.CharField(_('number'), max_length=30, blank=True)
street = models.CharField(_('street'), max_length=254, blank=True)
unit = models.CharField(_('unit'), max_length=30, blank=True)
neighborhood = models.CharField(_('neighborhood'), max_length=100, blank=True)
city = models.CharField(_('city'), max_length=100, default=defaults['city'])
state = models.CharField(_('state'), max_length=3, default=defaults['state'])
zipcode = models.CharField(_('zipcode'), max_length=12, blank=True)
country = models.CharField(_('country'), max_length=2, default=defaults['country'])
location = models.PointField(_('location'), srid=4326, default=defaults['location'])
class Meta:
abstract = True
def get_address(self):
address_str = ' '.join((self.number, self.street, self.unit)).strip()
if address_str:
if self.neighborhood:
address_str = ', '.join((address_str, self.neighborhood)).strip()
else:
address_str = self.neighborhood.strip()
if address_str:
address_str = ', '.join((address_str, self.city, self.state)).strip()
else:
address_str = ', '.join((self.city, self.state)).strip()
address_str = ' '.join((address_str, self.zipcode)).strip()
address_str = ', '.join((address_str, self.country)).strip()
return address_str
class UpdatedByModel(models.Model):
"""
An abstract base class model that provides comments and update fields.
"""
comment = models.CharField(_('comment'), max_length=254, blank=True)
updated_by = models.ForeignKey(User,
on_delete=models.CASCADE,
verbose_name=_('updated_by'))
updated_on = models.DateTimeField(_('updated_on'), auto_now=True)
class Meta:
abstract = True
class UpdatedByHistoryModel(models.Model):
"""
An abstract base class model that provides comments and update fields.
"""
comment = models.CharField(_('comment'), max_length=254, blank=True)
updated_by = models.ForeignKey(User,
on_delete=models.CASCADE,
verbose_name=_('updated_by'))
updated_on = models.DateTimeField(_('updated_on'), default=timezone.now)
class Meta:
abstract = True
| bsd-3-clause |
cwisecarver/osf.io | osf_tests/test_analytics.py | 1 | 7668 | # -*- coding: utf-8 -*-
"""
Unit tests for analytics logic in framework/analytics/__init__.py
"""
import unittest
import pytest
from django.utils import timezone
from nose.tools import * # flake8: noqa (PEP8 asserts)
from flask import Flask
from datetime import datetime
from framework import analytics, sessions
from framework.sessions import session
from osf.models import PageCounter, Session
from tests.base import OsfTestCase
from osf_tests.factories import UserFactory, ProjectFactory
pytestmark = pytest.mark.django_db
class TestAnalytics(OsfTestCase):
def test_get_total_activity_count(self):
user = UserFactory()
date = timezone.now()
assert_equal(analytics.get_total_activity_count(user._id), 0)
assert_equal(analytics.get_total_activity_count(user._id), user.get_activity_points(db=None))
analytics.increment_user_activity_counters(user._id, 'project_created', date.isoformat(), db=None)
assert_equal(analytics.get_total_activity_count(user._id, db=None), 1)
assert_equal(analytics.get_total_activity_count(user._id, db=None), user.get_activity_points(db=None))
def test_increment_user_activity_counters(self):
user = UserFactory()
date = timezone.now()
assert_equal(user.get_activity_points(db=None), 0)
analytics.increment_user_activity_counters(user._id, 'project_created', date.isoformat(), db=None)
assert_equal(user.get_activity_points(db=None), 1)
class UpdateCountersTestCase(OsfTestCase):
def setUp(self):
decoratorapp = Flask('decorators')
self.ctx = decoratorapp.test_request_context()
self.ctx.push()
# TODO: Think of something better @sloria @jmcarp
sessions.set_session(Session())
def tearDown(self):
self.ctx.pop()
class TestUpdateCounters(UpdateCountersTestCase):
def setUp(self):
super(TestUpdateCounters, self).setUp()
self.node = ProjectFactory()
self.user = self.node.creator
self.user2 = UserFactory()
self.node.add_contributor(self.user2, save=True)
self.fid = 'foo'
self.vid = 1
self.userid = self.user._id
self.node_info = {
'contributors': self.node.contributors
}
def test_update_counters_file(self):
@analytics.update_counters('download:{target_id}:{fid}', db=None)
def download_file_(**kwargs):
return kwargs.get('node') or kwargs.get('project')
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (None, None))
download_file_(node=self.node, fid=self.fid)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (1, 1))
page = 'download:{0}:{1}'.format(self.node._id, self.fid)
session.data['visited'].append(page)
download_file_(node=self.node, fid=self.fid)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (1, 2))
def test_update_counters_file_user_is_contributor(self):
@analytics.update_counters('download:{target_id}:{fid}', db=None, node_info=self.node_info)
def download_file_(**kwargs):
return kwargs.get('node') or kwargs.get('project')
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (None, None))
download_file_(node=self.node, fid=self.fid)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (1, 1))
page = 'download:{0}:{1}'.format(self.node._id, self.fid)
session.data['visited'].append(page)
session.data['auth_user_id'] = self.userid
download_file_(node=self.node, fid=self.fid)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (1, 1))
def test_update_counters_file_user_is_not_contributor(self):
@analytics.update_counters('download:{target_id}:{fid}', db=None, node_info=self.node_info)
def download_file_(**kwargs):
return kwargs.get('node') or kwargs.get('project')
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (None, None))
download_file_(node=self.node, fid=self.fid)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (1, 1))
page = 'download:{0}:{1}'.format(self.node._id, self.fid)
session.data['visited'].append(page)
session.data['auth_user_id'] = "asv12uey821vavshl"
download_file_(node=self.node, fid=self.fid)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (1, 2))
def test_update_counters_file_version(self):
@analytics.update_counters('download:{target_id}:{fid}:{vid}', db=None)
def download_file_version_(**kwargs):
return kwargs.get('node') or kwargs.get('project')
count = analytics.get_basic_counters('download:{0}:{1}:{2}'.format(self.node._id, self.fid, self.vid), db=None)
assert_equal(count, (None, None))
download_file_version_(node=self.node, fid=self.fid, vid=self.vid)
count = analytics.get_basic_counters('download:{0}:{1}:{2}'.format(self.node._id, self.fid, self.vid), db=None)
assert_equal(count, (1, 1))
page = 'download:{0}:{1}:{2}'.format(self.node._id, self.fid, self.vid)
session.data['visited'].append(page)
download_file_version_(node=self.node, fid=self.fid, vid=self.vid)
count = analytics.get_basic_counters('download:{0}:{1}:{2}'.format(self.node._id, self.fid, self.vid), db=None)
assert_equal(count, (1, 2))
def test_get_basic_counters(self):
page = 'node:' + str(self.node._id)
PageCounter.objects.create(_id=page, total=5, unique=3)
count = analytics.get_basic_counters(page, db=None)
assert_equal(count, (3, 5))
@unittest.skip('Reverted the fix for #2281. Unskip this once we use GUIDs for keys in the download counts collection')
def test_update_counters_different_files(self):
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/2281
@analytics.update_counters('download:{target_id}:{fid}', db=None)
def download_file_(**kwargs):
return kwargs.get('node') or kwargs.get('project')
fid1 = 'test.analytics.py'
fid2 = 'test_analytics.py'
download_file_(node=self.node, fid=fid1)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, fid1), db=None)
assert_equal(count, (1, 1))
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, fid2), db=None)
assert_equal(count, (None, None))
page = 'download:{0}:{1}'.format(self.node._id, fid1)
session.data['visited'].append(page)
download_file_(node=self.node, fid=fid1)
download_file_(node=self.node, fid=fid2)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, fid1), db=None)
assert_equal(count, (1, 2))
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, fid2), db=None)
assert_equal(count, (1, 1))
| apache-2.0 |
marcocaccin/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
cdrage/atomic | Atomic/diff.py | 3 | 12983 | import os
import sys
import rpm
from filecmp import dircmp
from . import util
from . import mount
from . import Atomic
class Diff(Atomic):
def diff(self):
'''
Allows you to 'diff' the RPMs between two different docker images|containers.
:return: None
'''
helpers = DiffHelpers(self.args)
images = self.args.compares
# Check to make sure each input is valid
for image in images:
self.get_input_id(image)
image_list = helpers.create_image_list(images)
try:
# Set up RPM classes and make sure each docker object
# is RPM-based
rpm_image_list = []
if self.args.rpms:
for image in image_list:
rpmimage = RpmDiff(image.chroot, image.name, self.args.names_only)
if not rpmimage.is_rpm:
helpers._cleanup(image_list)
raise ValueError("{0} is not RPM based.".format(rpmimage.name))
rpmimage._get_rpm_content()
rpm_image_list.append(rpmimage)
if not self.args.no_files:
helpers.output_files(images, image_list)
if self.args.rpms:
helpers.output_rpms(rpm_image_list)
# Clean up
helpers._cleanup(image_list)
if self.args.json:
util.output_json(helpers.json_out)
except KeyboardInterrupt:
util.writeOut("Quitting...")
helpers._cleanup(image_list)
class DiffHelpers(object):
"""
Helper class for the diff function
"""
def __init__(self, args):
self.args = args
self.json_out = {}
@staticmethod
def _cleanup(image_list):
"""
Class the cleanup def
:param image_list:
:return: None
"""
for image in image_list:
image._remove()
@staticmethod
def create_image_list(images):
"""
Instantiate each image into a class and then into
image_list
:param images:
:return: list of image class instantiations
"""
image_list = []
for image in images:
image_list.append(DiffObj(image))
return image_list
def output_files(self, images, image_list):
"""
Prints out the file differences when applicable
:param images:
:param image_list:
:return: None
"""
file_diff = DiffFS(image_list[0].chroot, image_list[1].chroot)
for image in image_list:
self.json_out[image.name] = {'{}_only'.format(image.name): file_diff._get_only(image.chroot)}
self.json_out['files_differ'] = file_diff.common_diff
if not self.args.json:
file_diff.print_results(images[0], images[1])
util.writeOut("\n")
def output_rpms(self, rpm_image_list):
"""
Prints out the differences in RPMs when applicable
:param rpm_image_list:
:return: None
"""
ip = RpmPrint(rpm_image_list)
if not self.args.json:
if ip.has_diff:
ip._print_diff(self.args.verbose)
else:
util.writeOut("\n{} and {} have no different RPMs".format(ip.i1.name, ip.i2.name))
# Output JSON content
else:
rpm_json = ip._rpm_json()
for image in rpm_json.keys():
if image not in self.json_out:
self.json_out[image] = rpm_json[image]
else:
_tmp = self.json_out[image]
_tmp.update(rpm_json[image])
self.json_out[image] = _tmp
class DiffObj(object):
def __init__(self, docker_name):
self.dm = mount.DockerMount("/tmp", mnt_mkdir=True)
self.name = docker_name
self.root_path = self.dm.mount(self.name)
self.chroot = os.path.join(self.root_path, "rootfs")
def _remove(self):
"""
Stub to unmount, remove the devmapper device (if needed), and
remove any temporary containers used
:return: None
"""
self.dm.unmount()
class RpmDiff(object):
"""
Class for handing the parsing of images during an
atomic diff
"""
def __init__(self, chroot, name, names_only):
self.chroot = chroot
self.name = name
self.is_rpm = self._is_rpm_based()
self.rpms = None
self.release = None
self.names_only = names_only
def _get_rpm_content(self):
"""
Populates the release and RPM information
:return: None
"""
self.rpms = self._get_rpms(self.chroot)
self.release = self._populate_rpm_content(self.chroot)
def _is_rpm_based(self):
"""
Determines if the image is based on RPM
:return: bool True or False
"""
if os.path.exists(os.path.join(self.chroot, 'usr/bin/rpm')):
return True
else:
return False
def _get_rpms(self, chroot_os):
"""
Pulls the NVRs of the RPMs in the image
:param chroot_os:
:return: sorted list pf RPM NVRs
"""
ts = rpm.TransactionSet(chroot_os)
ts.setVSFlags((rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS))
image_rpms = []
enc=sys.getdefaultencoding()
for hdr in ts.dbMatch(): # No sorting # pylint: disable=no-member
name = hdr['name'].decode(enc)
if name == 'gpg-pubkey':
continue
else:
if not self.names_only:
foo = "{0}-{1}-{2}".format(name,
hdr['epochnum'],
hdr['version'].decode(enc))
else:
foo = "{0}".format(name)
image_rpms.append(foo)
return sorted(image_rpms)
@staticmethod
def _populate_rpm_content(chroot_os):
"""
Get the release on the imageTrue
:param chroot_os:
:return: string release name
"""
etc_release_path = os.path.join(chroot_os,
"etc/redhat-release")
os_release = open(etc_release_path).read()
return os_release
class RpmPrint(object):
"""
Class to handle the output of atomic diff
"""
def __init__(self, image_list):
def _max_rpm_name_length(all_rpms):
_max = max([len(x) for x in all_rpms])
return _max if _max >= 30 else 30
self.image_list = image_list
self.i1, self.i2 = self.image_list
self.all_rpms = sorted(list(set(self.i1.rpms) | set(self.i2.rpms)))
self._max = _max_rpm_name_length(self.all_rpms)
self.two_col = "{0:" + str(self._max) + "} | {1:" \
+ str(self._max) + "}"
self.has_diff = False if set(self.i1.rpms) == set(self.i2.rpms) \
else True
def _print_diff(self, be_verbose):
"""
Outputs the diff information in columns
:return: None
"""
util.writeOut("")
util.writeOut(self.two_col.format(self.i1.name, self.i2.name))
util.writeOut(self.two_col.format("-"*self._max, "-"*self._max))
self._print_release()
util.writeOut(self.two_col.format("-"*self._max, "-"*self._max))
for rpm in self.all_rpms:
if (rpm in self.i1.rpms) and (rpm in self.i2.rpms):
if be_verbose:
util.writeOut(self.two_col.format(rpm, rpm))
elif (rpm in self.i1.rpms) and not (rpm in self.i2.rpms):
util.writeOut(self.two_col.format(rpm, ""))
elif not (rpm in self.i1.rpms) and (rpm in self.i2.rpms):
util.writeOut(self.two_col.format("", rpm))
def _print_release(self):
"""
Prints the release information and splits based on the column length
:return: None
"""
step = self._max - 2
r1_split = [self.i1.release.strip()[i:i+step] for i in range(0, len(self.i1.release.rstrip()), step)]
r2_split = [self.i2.release.strip()[i:i+step] for i in range(0, len(self.i2.release.rstrip()), step)]
for n in list(range(max(len(r1_split), len(r2_split)))):
col1 = r1_split[n] if 0 <= n < len(r1_split) else ""
col2 = r2_split[n] if 0 <= n < len(r2_split) else ""
util.writeOut(self.two_col.format(col1, col2))
def _rpm_json(self):
"""
Pretty prints the output in json format
:return: None
"""
def _form_image_json(image, exclusive, common):
return {
"release": image.release,
"all_rpms": image.rpms,
"exclusive_rpms": exclusive,
"common_rpms": common
}
l1_diff = sorted(list(set(self.i1.rpms) - set(self.i2.rpms)))
l2_diff = sorted(list(set(self.i2.rpms) - set(self.i1.rpms)))
common = sorted(list(set(self.i1.rpms).intersection(self.i2.rpms)))
json_out = {}
json_out[self.i1.name] = _form_image_json(self.i1, l1_diff, common)
json_out[self.i2.name] = _form_image_json(self.i2, l2_diff, common)
return json_out
class DiffFS(object):
"""
Primary class for doing a diff on two docker objects
"""
def __init__(self, chroot_left, chroot_right):
self.compare = dircmp(chroot_left, chroot_right)
self.left = []
self.right = []
self.common_diff = []
self.chroot_left = chroot_left
self.chroot_right = chroot_right
self.delta(self.compare)
def _get_only(self, _chroot):
"""
Simple function to return the right diff using the chroot path
as a key
:param _chroot:
:return: list of diffs for the chroot path
"""
return self.left if _chroot == self.chroot_left else self.right
@staticmethod
def _walk(walkdir):
"""
Walks the filesystem at the given walkdir
:param walkdir:
:return: list of files found
"""
file_list = []
walk = os.walk(walkdir)
for x in walk:
(_dir, dir_names, files) = x
if len(dir_names) < 1 and len(files) > 0:
for _file in files:
file_list.append(os.path.join(_dir, _file).encode('utf-8'))
elif len(dir_names) < 1 and len(files) < 1:
file_list.append(_dir.encode('utf-8'))
return file_list
def delta(self, compare_obj):
"""
Primary function for performing the recursive diff
:param compare_obj: a dircomp object
:return: None
"""
# Removing the fs path /tmp/<docker_obj>/rootfs
_left_path = compare_obj.left.replace(self.chroot_left, '')
_right_path = compare_obj.right.replace(self.chroot_right, '')
# Add list of common files but files appear different
for common in compare_obj.diff_files:
self.common_diff.append(os.path.join(_left_path, common))
# Add the diffs from left
for left in compare_obj.left_only:
fq_left = os.path.join(_left_path, left)
self.left.append(fq_left)
if os.path.isdir(fq_left):
walk = self._walk(fq_left)
self.left += walk
# Add the diffs from right
for right in compare_obj.right_only:
fq_right = os.path.join(_right_path, right)
self.right.append(os.path.join(_right_path, right))
if os.path.isdir(fq_right):
walk = self._walk(fq_right)
self.right += walk
# Follow all common subdirs
for _dir in compare_obj.subdirs.values():
self.delta(_dir)
def print_results(self, left_docker_obj, right_docker_obj):
"""
Pretty output for the results of the filesystem diff
:param left_docker_obj:
:param right_docker_obj:
:return:
"""
def _print_diff(file_list):
for _file in file_list:
util.writeOut("{0}{1}".format(5*" ", _file))
if all([len(self.left) == 0, len(self.right) == 0,
len(self.common_diff) == 0]):
util.writeOut("\nThere are no file differences between {0} "
"and {1}".format(left_docker_obj, right_docker_obj))
if len(self.left) > 0:
util.writeOut("\nFiles only in {}:".format(left_docker_obj))
_print_diff(self.left)
if len(self.right) > 0:
util.writeOut("\nFiles only in {}:".format(right_docker_obj))
_print_diff(self.right)
if len(self.common_diff):
util.writeOut("\nCommon files that are different:")
_print_diff(self.common_diff)
| lgpl-2.1 |
HyperBaton/ansible | lib/ansible/module_utils/network/netvisor/netvisor.py | 38 | 1971 | # Copyright: (c) 2018, Pluribus Networks
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.module_utils.connection import exec_command
def get_connection(module):
if hasattr(module, '_nvos_connection'):
return module._nvos_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._nvos_connection = Connection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module._nvos_connection
def get_capabilities(module):
if hasattr(module, '_nvos_capabilities'):
return module._nvos_capabilities
try:
capabilities = Connection(module._socket_path).get_capabilities()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
module._nvos_capabilities = json.loads(capabilities)
return module._nvos_capabilities
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc)
responses = (to_text(out, errors='surrogate_or_strict'))
return rc, out, err
| gpl-3.0 |
BassantMorsi/finderApp | lib/python2.7/site-packages/numpy/compat/_inspect.py | 86 | 7554 | """Subset of inspect module from upstream python
We use this instead of upstream because upstream inspect is slow to import, and
significantly contributes to numpy import times. Importing this copy has almost
no overhead.
"""
from __future__ import division, absolute_import, print_function
import types
__all__ = ['getargspec', 'formatargspec']
# ----------------------------------------------------------- type-checking
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None
"""
return isinstance(object, types.MethodType)
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)
"""
return isinstance(object, types.FunctionType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables
"""
return isinstance(object, types.CodeType)
# ------------------------------------------------ argument list extraction
# These constants are from Python's compile.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None.
"""
if not iscode(co):
raise TypeError('arg is not a code object')
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
# The following acrobatics are for anonymous (tuple) arguments.
# Which we do not need to support, so remove to avoid importing
# the dis module.
for i in range(nargs):
if args[i][:1] in ['', '.']:
raise TypeError("tuple function arguments are not supported")
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, varkw
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError('arg is not a Python function')
args, varargs, varkw = getargs(func.__code__)
return args, varargs, varkw, func.__defaults__
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame.
"""
args, varargs, varkw = getargs(frame.f_code)
return args, varargs, varkw, frame.f_locals
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + ', '.join(seq) + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element.
"""
if type(object) in [list, tuple]:
return join([strseq(_o, convert, join) for _o in object])
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments.
"""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i in range(len(args)):
spec = strseq(args[i], formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(varargs))
if varkw is not None:
specs.append(formatvarkw(varkw))
return '(' + ', '.join(specs) + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments.
"""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
| mit |
katstalk/android_external_chromium_org | build/android/gyp/create_standalone_apk.py | 27 | 1898 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Combines stripped libraries and incomplete APK into single standalone APK.
"""
import optparse
import os
import shutil
import sys
import tempfile
from util import build_utils
from util import md5_check
def CreateStandaloneApk(options):
def DoZip():
with tempfile.NamedTemporaryFile(suffix='.zip') as intermediate_file:
intermediate_path = intermediate_file.name
shutil.copy(options.input_apk_path, intermediate_path)
apk_path_abs = os.path.abspath(intermediate_path)
build_utils.CheckOutput(
['zip', '-r', '-1', apk_path_abs, 'lib'],
cwd=options.libraries_top_dir)
shutil.copy(intermediate_path, options.output_apk_path)
input_paths = [options.input_apk_path, options.libraries_top_dir]
record_path = '%s.standalone.stamp' % options.input_apk_path
md5_check.CallAndRecordIfStale(
DoZip,
record_path=record_path,
input_paths=input_paths)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--libraries-top-dir',
help='Top directory that contains libraries '
'(i.e. library paths are like '
'libraries_top_dir/lib/android_app_abi/foo.so).')
parser.add_option('--input-apk-path', help='Path to incomplete APK.')
parser.add_option('--output-apk-path', help='Path for standalone APK.')
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args()
required_options = ['libraries_top_dir', 'input_apk_path', 'output_apk_path']
build_utils.CheckOptions(options, parser, required=required_options)
CreateStandaloneApk(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
sean-/ansible | lib/ansible/galaxy/role.py | 39 | 11286 | ########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
import datetime
import os
import subprocess
import tarfile
import tempfile
import yaml
from shutil import rmtree
from urllib2 import urlopen
from ansible import constants as C
from ansible.errors import AnsibleError
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = os.path.join('meta', 'main.yml')
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
def __init__(self, galaxy, name, src=None, version=None, scm=None):
self._metadata = None
self._install_info = None
self.options = galaxy.options
self.display = galaxy.display
self.name = name
self.version = version
self.src = src
self.scm = scm
self.path = (os.path.join(galaxy.roles_path, self.name))
def fetch_from_scm_archive(self):
# this can be configured to prevent unwanted SCMS but cannot add new ones unless the code is also updated
if scm not in self.scms:
self.display.display("The %s scm is not currently supported" % scm)
return False
tempdir = tempfile.mkdtemp()
clone_cmd = [scm, 'clone', role_url, self.name]
with open('/dev/null', 'w') as devnull:
try:
self.display.display("- executing: %s" % " ".join(clone_cmd))
popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
except:
raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
rc = popen.wait()
if rc != 0:
self.display.display("- command %s failed" % ' '.join(clone_cmd))
self.display.display(" in directory %s" % tempdir)
return False
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
if scm == 'hg':
archive_cmd = ['hg', 'archive', '--prefix', "%s/" % self.name]
if role_version:
archive_cmd.extend(['-r', role_version])
archive_cmd.append(temp_file.name)
if scm == 'git':
archive_cmd = ['git', 'archive', '--prefix=%s/' % self.name, '--output=%s' % temp_file.name]
if role_version:
archive_cmd.append(role_version)
else:
archive_cmd.append('HEAD')
with open('/dev/null', 'w') as devnull:
self.display.display("- executing: %s" % " ".join(archive_cmd))
popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, self.name),
stderr=devnull, stdout=devnull)
rc = popen.wait()
if rc != 0:
self.display.display("- command %s failed" % ' '.join(archive_cmd))
self.display.display(" in directory %s" % tempdir)
return False
rmtree(tempdir, ignore_errors=True)
return temp_file.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
meta_path = os.path.join(self.path, self.META_MAIN)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except:
self.display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except:
self.display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
info_path = os.path.join(self.path, self.META_INSTALL)
try:
f = open(info_path, 'w+')
self._install_info = yaml.safe_dump(info, f)
except:
return False
finally:
f.close()
return True
def remove(self):
"""
Removes the specified role from the roles path. There is a
sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories
"""
if self.metadata:
try:
rmtree(self.path)
return True
except:
pass
return False
def fetch(self, target, role_data):
"""
Downloads the archived role from github to a temp location, extracts
it, and then copies the extracted role to the role library path.
"""
# first grab the file and save it to a temp location
if self.src:
archive_url = self.src
else:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target)
self.display.display("- downloading role from %s" % archive_url)
try:
url_file = urlopen(archive_url)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except:
# TODO: better urllib2 error handling for error
# messages that are more exact
self.display.error("failed to download the file.")
return False
def install(self, role_filename):
# the file is a tar, so open it that way and extract it
# to the specified (or default) roles directory
if not tarfile.is_tarfile(role_filename):
self.display.error("the file downloaded was not a tar.gz")
return False
else:
if role_filename.endswith('.gz'):
role_tar_file = tarfile.open(role_filename, "r:gz")
else:
role_tar_file = tarfile.open(role_filename, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
if self.META_MAIN in member.name:
meta_file = member
break
if not meta_file:
self.display.error("this role does not appear to have a meta/main.yml file.")
return False
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
self.display.error("this role does not appear to have a valid meta/main.yml file.")
return False
# we strip off the top-level directory for all of the files contained within
# the tar file here, since the default is 'github_repo-target', and change it
# to the specified role's name
self.display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
self.display.error("the specified roles path exists and is not a directory.")
return False
elif not getattr(self.options, "force", False):
self.display.error("the specified role %s appears to already exist. Use --force to replace it." % self.name)
return False
else:
# using --force, remove the old path
if not self.remove():
self.display.error("%s doesn't appear to contain a role." % self.path)
self.display.error(" please remove this directory manually if you really want to put the role here.")
return False
else:
os.makedirs(self.path)
# now we do the actual extraction to the path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop the leading directory, as mentioned above
if member.isreg() or member.issym():
parts = member.name.split(os.sep)[1:]
final_parts = []
for part in parts:
if part != '..' and '~' not in part and '$' not in part:
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, self.path)
# write out the install info file for later use
self._write_galaxy_install_info()
except OSError as e:
self.display.error("Could not update files in %s: %s" % (self.path, str(e)))
return False
# return the parsed yaml metadata
self.display.display("- %s was installed successfully" % self.name)
return True
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
| gpl-3.0 |
giampaolo/pyftpdlib | pyftpdlib/handlers.py | 1 | 144463 | # Copyright (C) 2007 Giampaolo Rodola' <g.rodola@gmail.com>.
# Use of this source code is governed by MIT license that can be
# found in the LICENSE file.
import asynchat
import contextlib
import errno
import glob
import logging
import os
import random
import socket
import sys
import time
import traceback
import warnings
from datetime import datetime
try:
import pwd
import grp
except ImportError:
pwd = grp = None
try:
from OpenSSL import SSL # requires "pip install pyopenssl"
except ImportError:
SSL = None
try:
from collections import OrderedDict # python >= 2.7
except ImportError:
OrderedDict = dict
from . import __ver__
from ._compat import b
from ._compat import getcwdu
from ._compat import PY3
from ._compat import super
from ._compat import u
from ._compat import unicode
from ._compat import xrange
from .authorizers import AuthenticationFailed
from .authorizers import AuthorizerError
from .authorizers import DummyAuthorizer
from .filesystems import AbstractedFS
from .filesystems import FilesystemError
from .ioloop import _ERRNOS_DISCONNECTED
from .ioloop import _ERRNOS_RETRY
from .ioloop import Acceptor
from .ioloop import AsyncChat
from .ioloop import Connector
from .ioloop import RetryError
from .ioloop import timer
from .log import debug
from .log import logger
CR_BYTE = ord('\r')
def _import_sendfile():
# By default attempt to use os.sendfile introduced in Python 3.3:
# http://bugs.python.org/issue10882
# ...otherwise fallback on using third-party pysendfile module:
# https://github.com/giampaolo/pysendfile/
if os.name == 'posix':
try:
return os.sendfile # py >= 3.3
except AttributeError:
try:
import sendfile as sf
# dirty hack to detect whether old 1.2.4 version is installed
if hasattr(sf, 'has_sf_hdtr'):
raise ImportError
return sf.sendfile
except ImportError:
pass
return None
sendfile = _import_sendfile()
proto_cmds = {
'ABOR': dict(
perm=None, auth=True, arg=False,
help='Syntax: ABOR (abort transfer).'),
'ALLO': dict(
perm=None, auth=True, arg=True,
help='Syntax: ALLO <SP> bytes (noop; allocate storage).'),
'APPE': dict(
perm='a', auth=True, arg=True,
help='Syntax: APPE <SP> file-name (append data to file).'),
'CDUP': dict(
perm='e', auth=True, arg=False,
help='Syntax: CDUP (go to parent directory).'),
'CWD': dict(
perm='e', auth=True, arg=None,
help='Syntax: CWD [<SP> dir-name] (change working directory).'),
'DELE': dict(
perm='d', auth=True, arg=True,
help='Syntax: DELE <SP> file-name (delete file).'),
'EPRT': dict(
perm=None, auth=True, arg=True,
help='Syntax: EPRT <SP> |proto|ip|port| (extended active mode).'),
'EPSV': dict(
perm=None, auth=True, arg=None,
help='Syntax: EPSV [<SP> proto/"ALL"] (extended passive mode).'),
'FEAT': dict(
perm=None, auth=False, arg=False,
help='Syntax: FEAT (list all new features supported).'),
'HELP': dict(
perm=None, auth=False, arg=None,
help='Syntax: HELP [<SP> cmd] (show help).'),
'LIST': dict(
perm='l', auth=True, arg=None,
help='Syntax: LIST [<SP> path] (list files).'),
'MDTM': dict(
perm='l', auth=True, arg=True,
help='Syntax: MDTM [<SP> path] (file last modification time).'),
'MFMT': dict(
perm='T', auth=True, arg=True,
help='Syntax: MFMT <SP> timeval <SP> path (file update last '
'modification time).'),
'MLSD': dict(
perm='l', auth=True, arg=None,
help='Syntax: MLSD [<SP> path] (list directory).'),
'MLST': dict(
perm='l', auth=True, arg=None,
help='Syntax: MLST [<SP> path] (show information about path).'),
'MODE': dict(
perm=None, auth=True, arg=True,
help='Syntax: MODE <SP> mode (noop; set data transfer mode).'),
'MKD': dict(
perm='m', auth=True, arg=True,
help='Syntax: MKD <SP> path (create directory).'),
'NLST': dict(
perm='l', auth=True, arg=None,
help='Syntax: NLST [<SP> path] (list path in a compact form).'),
'NOOP': dict(
perm=None, auth=False, arg=False,
help='Syntax: NOOP (just do nothing).'),
'OPTS': dict(
perm=None, auth=True, arg=True,
help='Syntax: OPTS <SP> cmd [<SP> option] (set option for command).'),
'PASS': dict(
perm=None, auth=False, arg=None,
help='Syntax: PASS [<SP> password] (set user password).'),
'PASV': dict(
perm=None, auth=True, arg=False,
help='Syntax: PASV (open passive data connection).'),
'PORT': dict(
perm=None, auth=True, arg=True,
help='Syntax: PORT <sp> h,h,h,h,p,p (open active data connection).'),
'PWD': dict(
perm=None, auth=True, arg=False,
help='Syntax: PWD (get current working directory).'),
'QUIT': dict(
perm=None, auth=False, arg=False,
help='Syntax: QUIT (quit current session).'),
'REIN': dict(
perm=None, auth=True, arg=False,
help='Syntax: REIN (flush account).'),
'REST': dict(
perm=None, auth=True, arg=True,
help='Syntax: REST <SP> offset (set file offset).'),
'RETR': dict(
perm='r', auth=True, arg=True,
help='Syntax: RETR <SP> file-name (retrieve a file).'),
'RMD': dict(
perm='d', auth=True, arg=True,
help='Syntax: RMD <SP> dir-name (remove directory).'),
'RNFR': dict(
perm='f', auth=True, arg=True,
help='Syntax: RNFR <SP> file-name (rename (source name)).'),
'RNTO': dict(
perm='f', auth=True, arg=True,
help='Syntax: RNTO <SP> file-name (rename (destination name)).'),
'SITE': dict(
perm=None, auth=False, arg=True,
help='Syntax: SITE <SP> site-command (execute SITE command).'),
'SITE HELP': dict(
perm=None, auth=False, arg=None,
help='Syntax: SITE HELP [<SP> cmd] (show SITE command help).'),
'SITE CHMOD': dict(
perm='M', auth=True, arg=True,
help='Syntax: SITE CHMOD <SP> mode path (change file mode).'),
'SIZE': dict(
perm='l', auth=True, arg=True,
help='Syntax: SIZE <SP> file-name (get file size).'),
'STAT': dict(
perm='l', auth=False, arg=None,
help='Syntax: STAT [<SP> path name] (server stats [list files]).'),
'STOR': dict(
perm='w', auth=True, arg=True,
help='Syntax: STOR <SP> file-name (store a file).'),
'STOU': dict(
perm='w', auth=True, arg=None,
help='Syntax: STOU [<SP> name] (store a file with a unique name).'),
'STRU': dict(
perm=None, auth=True, arg=True,
help='Syntax: STRU <SP> type (noop; set file structure).'),
'SYST': dict(
perm=None, auth=False, arg=False,
help='Syntax: SYST (get operating system type).'),
'TYPE': dict(
perm=None, auth=True, arg=True,
help='Syntax: TYPE <SP> [A | I] (set transfer type).'),
'USER': dict(
perm=None, auth=False, arg=True,
help='Syntax: USER <SP> user-name (set username).'),
'XCUP': dict(
perm='e', auth=True, arg=False,
help='Syntax: XCUP (obsolete; go to parent directory).'),
'XCWD': dict(
perm='e', auth=True, arg=None,
help='Syntax: XCWD [<SP> dir-name] (obsolete; change directory).'),
'XMKD': dict(
perm='m', auth=True, arg=True,
help='Syntax: XMKD <SP> dir-name (obsolete; create directory).'),
'XPWD': dict(
perm=None, auth=True, arg=False,
help='Syntax: XPWD (obsolete; get current dir).'),
'XRMD': dict(
perm='d', auth=True, arg=True,
help='Syntax: XRMD <SP> dir-name (obsolete; remove directory).'),
}
if not hasattr(os, 'chmod'):
del proto_cmds['SITE CHMOD']
def _strerror(err):
if isinstance(err, EnvironmentError):
try:
return os.strerror(err.errno)
except AttributeError:
# not available on PythonCE
if not hasattr(os, 'strerror'):
return err.strerror
raise
else:
return str(err)
def _is_ssl_sock(sock):
return SSL is not None and isinstance(sock, SSL.Connection)
def _support_hybrid_ipv6():
"""Return True if it is possible to use hybrid IPv6/IPv4 sockets
on this platform.
"""
# Note: IPPROTO_IPV6 constant is broken on Windows, see:
# http://bugs.python.org/issue6926
try:
if not socket.has_ipv6:
return False
with contextlib.closing(socket.socket(socket.AF_INET6)) as sock:
return not sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
except (socket.error, AttributeError):
return False
SUPPORTS_HYBRID_IPV6 = _support_hybrid_ipv6()
class _FileReadWriteError(OSError):
"""Exception raised when reading or writing a file during a transfer."""
class _GiveUpOnSendfile(Exception):
"""Exception raised in case use of sendfile() fails on first try,
in which case send() will be used.
"""
# --- DTP classes
class PassiveDTP(Acceptor):
"""Creates a socket listening on a local port, dispatching the
resultant connection to DTPHandler. Used for handling PASV command.
- (int) timeout: the timeout for a remote client to establish
connection with the listening socket. Defaults to 30 seconds.
- (int) backlog: the maximum number of queued connections passed
to listen(). If a connection request arrives when the queue is
full the client may raise ECONNRESET. Defaults to 5.
"""
timeout = 30
backlog = None
def __init__(self, cmd_channel, extmode=False):
"""Initialize the passive data server.
- (instance) cmd_channel: the command channel class instance.
- (bool) extmode: whether use extended passive mode response type.
"""
self.cmd_channel = cmd_channel
self.log = cmd_channel.log
self.log_exception = cmd_channel.log_exception
Acceptor.__init__(self, ioloop=cmd_channel.ioloop)
local_ip = self.cmd_channel.socket.getsockname()[0]
if local_ip in self.cmd_channel.masquerade_address_map:
masqueraded_ip = self.cmd_channel.masquerade_address_map[local_ip]
elif self.cmd_channel.masquerade_address:
masqueraded_ip = self.cmd_channel.masquerade_address
else:
masqueraded_ip = None
if self.cmd_channel.server.socket.family != socket.AF_INET:
# dual stack IPv4/IPv6 support
af = self.bind_af_unspecified((local_ip, 0))
self.socket.close()
self.del_channel()
else:
af = self.cmd_channel.socket.family
self.create_socket(af, socket.SOCK_STREAM)
if self.cmd_channel.passive_ports is None:
# By using 0 as port number value we let kernel choose a
# free unprivileged random port.
self.bind((local_ip, 0))
else:
ports = list(self.cmd_channel.passive_ports)
while ports:
port = ports.pop(random.randint(0, len(ports) - 1))
self.set_reuse_addr()
try:
self.bind((local_ip, port))
except socket.error as err:
if err.errno == errno.EADDRINUSE: # port already in use
if ports:
continue
# If cannot use one of the ports in the configured
# range we'll use a kernel-assigned port, and log
# a message reporting the issue.
# By using 0 as port number value we let kernel
# choose a free unprivileged random port.
else:
self.bind((local_ip, 0))
self.cmd_channel.log(
"Can't find a valid passive port in the "
"configured range. A random kernel-assigned "
"port will be used.",
logfun=logger.warning
)
else:
raise
else:
break
self.listen(self.backlog or self.cmd_channel.server.backlog)
port = self.socket.getsockname()[1]
if not extmode:
ip = masqueraded_ip or local_ip
if ip.startswith('::ffff:'):
# In this scenario, the server has an IPv6 socket, but
# the remote client is using IPv4 and its address is
# represented as an IPv4-mapped IPv6 address which
# looks like this ::ffff:151.12.5.65, see:
# http://en.wikipedia.org/wiki/IPv6#IPv4-mapped_addresses
# http://tools.ietf.org/html/rfc3493.html#section-3.7
# We truncate the first bytes to make it look like a
# common IPv4 address.
ip = ip[7:]
# The format of 227 response in not standardized.
# This is the most expected:
resp = '227 Entering passive mode (%s,%d,%d).' % (
ip.replace('.', ','), port // 256, port % 256)
self.cmd_channel.respond(resp)
else:
self.cmd_channel.respond('229 Entering extended passive mode '
'(|||%d|).' % port)
if self.timeout:
self.call_later(self.timeout, self.handle_timeout)
# --- connection / overridden
def handle_accepted(self, sock, addr):
"""Called when remote client initiates a connection."""
if not self.cmd_channel.connected:
return self.close()
# Check the origin of data connection. If not expressively
# configured we drop the incoming data connection if remote
# IP address does not match the client's IP address.
if self.cmd_channel.remote_ip != addr[0]:
if not self.cmd_channel.permit_foreign_addresses:
try:
sock.close()
except socket.error:
pass
msg = '425 Rejected data connection from foreign address ' \
'%s:%s.' % (addr[0], addr[1])
self.cmd_channel.respond_w_warning(msg)
# do not close listening socket: it couldn't be client's blame
return
else:
# site-to-site FTP allowed
msg = 'Established data connection with foreign address ' \
'%s:%s.' % (addr[0], addr[1])
self.cmd_channel.log(msg, logfun=logger.warning)
# Immediately close the current channel (we accept only one
# connection at time) and avoid running out of max connections
# limit.
self.close()
# delegate such connection to DTP handler
if self.cmd_channel.connected:
handler = self.cmd_channel.dtp_handler(sock, self.cmd_channel)
if handler.connected:
self.cmd_channel.data_channel = handler
self.cmd_channel._on_dtp_connection()
def handle_timeout(self):
if self.cmd_channel.connected:
self.cmd_channel.respond("421 Passive data channel timed out.",
logfun=logger.info)
self.close()
def handle_error(self):
"""Called to handle any uncaught exceptions."""
try:
raise
except Exception:
logger.error(traceback.format_exc())
try:
self.close()
except Exception:
logger.critical(traceback.format_exc())
def close(self):
debug("call: close()", inst=self)
Acceptor.close(self)
class ActiveDTP(Connector):
"""Connects to remote client and dispatches the resulting connection
to DTPHandler. Used for handling PORT command.
- (int) timeout: the timeout for us to establish connection with
the client's listening data socket.
"""
timeout = 30
def __init__(self, ip, port, cmd_channel):
"""Initialize the active data channel attempting to connect
to remote data socket.
- (str) ip: the remote IP address.
- (int) port: the remote port.
- (instance) cmd_channel: the command channel class instance.
"""
Connector.__init__(self, ioloop=cmd_channel.ioloop)
self.cmd_channel = cmd_channel
self.log = cmd_channel.log
self.log_exception = cmd_channel.log_exception
self._idler = None
if self.timeout:
self._idler = self.ioloop.call_later(self.timeout,
self.handle_timeout,
_errback=self.handle_error)
if ip.count('.') == 4:
self._cmd = "PORT"
self._normalized_addr = "%s:%s" % (ip, port)
else:
self._cmd = "EPRT"
self._normalized_addr = "[%s]:%s" % (ip, port)
source_ip = self.cmd_channel.socket.getsockname()[0]
# dual stack IPv4/IPv6 support
try:
self.connect_af_unspecified((ip, port), (source_ip, 0))
except (socket.gaierror, socket.error):
self.handle_close()
def readable(self):
return False
def handle_write(self):
# overridden to prevent unhandled read/write event messages to
# be printed by asyncore on Python < 2.6
pass
def handle_connect(self):
"""Called when connection is established."""
self.del_channel()
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
if not self.cmd_channel.connected:
return self.close()
# fix for asyncore on python < 2.6, meaning we aren't
# actually connected.
# test_active_conn_error tests this condition
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err)
#
msg = 'Active data connection established.'
self.cmd_channel.respond('200 ' + msg)
self.cmd_channel.log_cmd(self._cmd, self._normalized_addr, 200, msg)
#
if not self.cmd_channel.connected:
return self.close()
# delegate such connection to DTP handler
handler = self.cmd_channel.dtp_handler(self.socket, self.cmd_channel)
self.cmd_channel.data_channel = handler
self.cmd_channel._on_dtp_connection()
def handle_timeout(self):
if self.cmd_channel.connected:
msg = "Active data channel timed out."
self.cmd_channel.respond("421 " + msg, logfun=logger.info)
self.cmd_channel.log_cmd(
self._cmd, self._normalized_addr, 421, msg)
self.close()
def handle_close(self):
# With the new IO loop, handle_close() gets called in case
# the fd appears in the list of exceptional fds.
# This means connect() failed.
if not self._closed:
self.close()
if self.cmd_channel.connected:
msg = "Can't connect to specified address."
self.cmd_channel.respond("425 " + msg)
self.cmd_channel.log_cmd(
self._cmd, self._normalized_addr, 425, msg)
def handle_error(self):
"""Called to handle any uncaught exceptions."""
try:
raise
except (socket.gaierror, socket.error):
pass
except Exception:
self.log_exception(self)
try:
self.handle_close()
except Exception:
logger.critical(traceback.format_exc())
def close(self):
debug("call: close()", inst=self)
if not self._closed:
Connector.close(self)
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
class DTPHandler(AsyncChat):
"""Class handling server-data-transfer-process (server-DTP, see
RFC-959) managing data-transfer operations involving sending
and receiving data.
Class attributes:
- (int) timeout: the timeout which roughly is the maximum time we
permit data transfers to stall for with no progress. If the
timeout triggers, the remote client will be kicked off
(defaults 300).
- (int) ac_in_buffer_size: incoming data buffer size (defaults 65536)
- (int) ac_out_buffer_size: outgoing data buffer size (defaults 65536)
"""
timeout = 300
ac_in_buffer_size = 65536
ac_out_buffer_size = 65536
def __init__(self, sock, cmd_channel):
"""Initialize the command channel.
- (instance) sock: the socket object instance of the newly
established connection.
- (instance) cmd_channel: the command channel class instance.
"""
self.cmd_channel = cmd_channel
self.file_obj = None
self.receive = False
self.transfer_finished = False
self.tot_bytes_sent = 0
self.tot_bytes_received = 0
self.cmd = None
self.log = cmd_channel.log
self.log_exception = cmd_channel.log_exception
self._data_wrapper = None
self._lastdata = 0
self._had_cr = False
self._start_time = timer()
self._resp = ()
self._offset = None
self._filefd = None
self._idler = None
self._initialized = False
try:
AsyncChat.__init__(self, sock, ioloop=cmd_channel.ioloop)
except socket.error as err:
# if we get an exception here we want the dispatcher
# instance to set socket attribute before closing, see:
# https://github.com/giampaolo/pyftpdlib/issues/188
AsyncChat.__init__(
self, socket.socket(), ioloop=cmd_channel.ioloop)
# https://github.com/giampaolo/pyftpdlib/issues/143
self.close()
if err.errno == errno.EINVAL:
return
self.handle_error()
return
# remove this instance from IOLoop's socket map
if not self.connected:
self.close()
return
if self.timeout:
self._idler = self.ioloop.call_every(self.timeout,
self.handle_timeout,
_errback=self.handle_error)
def __repr__(self):
return '<%s(%s)>' % (self.__class__.__name__,
self.cmd_channel.get_repr_info(as_str=True))
__str__ = __repr__
def use_sendfile(self):
if not self.cmd_channel.use_sendfile:
# as per server config
return False
if self.file_obj is None or not hasattr(self.file_obj, "fileno"):
# directory listing or unusual file obj
return False
try:
# io.IOBase default implementation raises io.UnsupportedOperation
self.file_obj.fileno()
except OSError:
return False
if self.cmd_channel._current_type != 'i':
# text file transfer (need to transform file content on the fly)
return False
return True
def push(self, data):
self._initialized = True
self.modify_ioloop_events(self.ioloop.WRITE)
self._wanted_io_events = self.ioloop.WRITE
AsyncChat.push(self, data)
def push_with_producer(self, producer):
self._initialized = True
self.modify_ioloop_events(self.ioloop.WRITE)
self._wanted_io_events = self.ioloop.WRITE
if self.use_sendfile():
self._offset = producer.file.tell()
self._filefd = self.file_obj.fileno()
try:
self.initiate_sendfile()
except _GiveUpOnSendfile:
pass
else:
self.initiate_send = self.initiate_sendfile
return
debug("starting transfer using send()", self)
AsyncChat.push_with_producer(self, producer)
def close_when_done(self):
asynchat.async_chat.close_when_done(self)
def initiate_send(self):
asynchat.async_chat.initiate_send(self)
def initiate_sendfile(self):
"""A wrapper around sendfile."""
try:
sent = sendfile(self._fileno, self._filefd, self._offset,
self.ac_out_buffer_size)
except OSError as err:
if err.errno in _ERRNOS_RETRY or err.errno == errno.EBUSY:
return
elif err.errno in _ERRNOS_DISCONNECTED:
self.handle_close()
else:
if self.tot_bytes_sent == 0:
logger.warning(
"sendfile() failed; falling back on using plain send")
raise _GiveUpOnSendfile
else:
raise
else:
if sent == 0:
# this signals the channel that the transfer is completed
self.discard_buffers()
self.handle_close()
else:
self._offset += sent
self.tot_bytes_sent += sent
# --- utility methods
def _posix_ascii_data_wrapper(self, chunk):
"""The data wrapper used for receiving data in ASCII mode on
systems using a single line terminator, handling those cases
where CRLF ('\r\n') gets delivered in two chunks.
"""
if self._had_cr:
chunk = b'\r' + chunk
if chunk.endswith(b'\r'):
self._had_cr = True
chunk = chunk[:-1]
else:
self._had_cr = False
return chunk.replace(b'\r\n', b(os.linesep))
def enable_receiving(self, type, cmd):
"""Enable receiving of data over the channel. Depending on the
TYPE currently in use it creates an appropriate wrapper for the
incoming data.
- (str) type: current transfer type, 'a' (ASCII) or 'i' (binary).
"""
self._initialized = True
self.modify_ioloop_events(self.ioloop.READ)
self._wanted_io_events = self.ioloop.READ
self.cmd = cmd
if type == 'a':
if os.linesep == '\r\n':
self._data_wrapper = None
else:
self._data_wrapper = self._posix_ascii_data_wrapper
elif type == 'i':
self._data_wrapper = None
else:
raise TypeError("unsupported type")
self.receive = True
def get_transmitted_bytes(self):
"""Return the number of transmitted bytes."""
return self.tot_bytes_sent + self.tot_bytes_received
def get_elapsed_time(self):
"""Return the transfer elapsed time in seconds."""
return timer() - self._start_time
def transfer_in_progress(self):
"""Return True if a transfer is in progress, else False."""
return self.get_transmitted_bytes() != 0
# --- connection
def send(self, data):
result = AsyncChat.send(self, data)
self.tot_bytes_sent += result
return result
def refill_buffer(self): # pragma: no cover
"""Overridden as a fix around http://bugs.python.org/issue1740572
(when the producer is consumed, close() was called instead of
handle_close()).
"""
while True:
if len(self.producer_fifo):
p = self.producer_fifo.first()
# a 'None' in the producer fifo is a sentinel,
# telling us to close the channel.
if p is None:
if not self.ac_out_buffer:
self.producer_fifo.pop()
# self.close()
self.handle_close()
return
elif isinstance(p, str):
self.producer_fifo.pop()
self.ac_out_buffer += p
return
data = p.more()
if data:
self.ac_out_buffer = self.ac_out_buffer + data
return
else:
self.producer_fifo.pop()
else:
return
def handle_read(self):
"""Called when there is data waiting to be read."""
try:
chunk = self.recv(self.ac_in_buffer_size)
except RetryError:
pass
except socket.error:
self.handle_error()
else:
self.tot_bytes_received += len(chunk)
if not chunk:
self.transfer_finished = True
# self.close() # <-- asyncore.recv() already do that...
return
if self._data_wrapper is not None:
chunk = self._data_wrapper(chunk)
try:
self.file_obj.write(chunk)
except OSError as err:
raise _FileReadWriteError(err)
handle_read_event = handle_read # small speedup
def readable(self):
"""Predicate for inclusion in the readable for select()."""
# It the channel is not supposed to be receiving but yet it's
# in the list of readable events, that means it has been
# disconnected, in which case we explicitly close() it.
# This is necessary as differently from FTPHandler this channel
# is not supposed to be readable/writable at first, meaning the
# upper IOLoop might end up calling readable() repeatedly,
# hogging CPU resources.
if not self.receive and not self._initialized:
return self.close()
return self.receive
def writable(self):
"""Predicate for inclusion in the writable for select()."""
return not self.receive and asynchat.async_chat.writable(self)
def handle_timeout(self):
"""Called cyclically to check if data transfer is stalling with
no progress in which case the client is kicked off.
"""
if self.get_transmitted_bytes() > self._lastdata:
self._lastdata = self.get_transmitted_bytes()
else:
msg = "Data connection timed out."
self._resp = ("421 " + msg, logger.info)
self.close()
self.cmd_channel.close_when_done()
def handle_error(self):
"""Called when an exception is raised and not otherwise handled."""
try:
raise
# an error could occur in case we fail reading / writing
# from / to file (e.g. file system gets full)
except _FileReadWriteError as err:
error = _strerror(err.errno)
except Exception:
# some other exception occurred; we don't want to provide
# confidential error messages
self.log_exception(self)
error = "Internal error"
try:
self._resp = ("426 %s; transfer aborted." % error, logger.warning)
self.close()
except Exception:
logger.critical(traceback.format_exc())
def handle_close(self):
"""Called when the socket is closed."""
# If we used channel for receiving we assume that transfer is
# finished when client closes the connection, if we used channel
# for sending we have to check that all data has been sent
# (responding with 226) or not (responding with 426).
# In both cases handle_close() is automatically called by the
# underlying asynchat module.
if not self._closed:
if self.receive:
self.transfer_finished = True
else:
self.transfer_finished = len(self.producer_fifo) == 0
try:
if self.transfer_finished:
self._resp = ("226 Transfer complete.", logger.debug)
else:
tot_bytes = self.get_transmitted_bytes()
self._resp = ("426 Transfer aborted; %d bytes transmitted."
% tot_bytes, logger.debug)
finally:
self.close()
def close(self):
"""Close the data channel, first attempting to close any remaining
file handles."""
debug("call: close()", inst=self)
if not self._closed:
# RFC-959 says we must close the connection before replying
AsyncChat.close(self)
# Close file object before responding successfully to client
if self.file_obj is not None and not self.file_obj.closed:
self.file_obj.close()
if self._resp:
self.cmd_channel.respond(self._resp[0], logfun=self._resp[1])
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
if self.file_obj is not None:
filename = self.file_obj.name
elapsed_time = round(self.get_elapsed_time(), 3)
self.cmd_channel.log_transfer(
cmd=self.cmd,
filename=self.file_obj.name,
receive=self.receive,
completed=self.transfer_finished,
elapsed=elapsed_time,
bytes=self.get_transmitted_bytes())
if self.transfer_finished:
if self.receive:
self.cmd_channel.on_file_received(filename)
else:
self.cmd_channel.on_file_sent(filename)
else:
if self.receive:
self.cmd_channel.on_incomplete_file_received(filename)
else:
self.cmd_channel.on_incomplete_file_sent(filename)
self.cmd_channel._on_dtp_close()
# dirty hack in order to turn AsyncChat into a new style class in
# python 2.x so that we can use super()
if PY3:
class _AsyncChatNewStyle(AsyncChat):
pass
else:
class _AsyncChatNewStyle(object, AsyncChat):
def __init__(self, *args, **kwargs):
super(object, self).__init__(*args, **kwargs) # bypass object
class ThrottledDTPHandler(_AsyncChatNewStyle, DTPHandler):
"""A DTPHandler subclass which wraps sending and receiving in a data
counter and temporarily "sleeps" the channel so that you burst to no
more than x Kb/sec average.
- (int) read_limit: the maximum number of bytes to read (receive)
in one second (defaults to 0 == no limit).
- (int) write_limit: the maximum number of bytes to write (send)
in one second (defaults to 0 == no limit).
- (bool) auto_sized_buffers: this option only applies when read
and/or write limits are specified. When enabled it bumps down
the data buffer sizes so that they are never greater than read
and write limits which results in a less bursty and smoother
throughput (default: True).
"""
read_limit = 0
write_limit = 0
auto_sized_buffers = True
def __init__(self, sock, cmd_channel):
super().__init__(sock, cmd_channel)
self._timenext = 0
self._datacount = 0
self.sleeping = False
self._throttler = None
if self.auto_sized_buffers:
if self.read_limit:
while self.ac_in_buffer_size > self.read_limit:
self.ac_in_buffer_size /= 2
if self.write_limit:
while self.ac_out_buffer_size > self.write_limit:
self.ac_out_buffer_size /= 2
self.ac_in_buffer_size = int(self.ac_in_buffer_size)
self.ac_out_buffer_size = int(self.ac_out_buffer_size)
def __repr__(self):
return DTPHandler.__repr__(self)
def use_sendfile(self):
return False
def recv(self, buffer_size):
chunk = super().recv(buffer_size)
if self.read_limit:
self._throttle_bandwidth(len(chunk), self.read_limit)
return chunk
def send(self, data):
num_sent = super().send(data)
if self.write_limit:
self._throttle_bandwidth(num_sent, self.write_limit)
return num_sent
def _cancel_throttler(self):
if self._throttler is not None and not self._throttler.cancelled:
self._throttler.cancel()
def _throttle_bandwidth(self, len_chunk, max_speed):
"""A method which counts data transmitted so that you burst to
no more than x Kb/sec average.
"""
self._datacount += len_chunk
if self._datacount >= max_speed:
self._datacount = 0
now = timer()
sleepfor = (self._timenext - now) * 2
if sleepfor > 0:
# we've passed bandwidth limits
def unsleep():
if self.receive:
event = self.ioloop.READ
else:
event = self.ioloop.WRITE
self.add_channel(events=event)
self.del_channel()
self._cancel_throttler()
self._throttler = self.ioloop.call_later(
sleepfor, unsleep, _errback=self.handle_error)
self._timenext = now + 1
def close(self):
self._cancel_throttler()
super().close()
# --- producers
class FileProducer(object):
"""Producer wrapper for file[-like] objects."""
buffer_size = 65536
def __init__(self, file, type):
"""Initialize the producer with a data_wrapper appropriate to TYPE.
- (file) file: the file[-like] object.
- (str) type: the current TYPE, 'a' (ASCII) or 'i' (binary).
"""
self.file = file
self.type = type
self._prev_chunk_endswith_cr = False
if type == 'a' and os.linesep != '\r\n':
self._data_wrapper = self._posix_ascii_data_wrapper
else:
self._data_wrapper = None
def _posix_ascii_data_wrapper(self, chunk):
"""The data wrapper used for sending data in ASCII mode on
systems using a single line terminator, handling those cases
where CRLF ('\r\n') gets delivered in two chunks.
"""
chunk = bytearray(chunk)
pos = 0
if self._prev_chunk_endswith_cr and chunk.startswith(b'\n'):
pos += 1
while True:
pos = chunk.find(b'\n', pos)
if pos == -1:
break
if chunk[pos - 1] != CR_BYTE:
chunk.insert(pos, CR_BYTE)
pos += 1
pos += 1
self._prev_chunk_endswith_cr = chunk.endswith(b'\r')
return chunk
def more(self):
"""Attempt a chunk of data of size self.buffer_size."""
try:
data = self.file.read(self.buffer_size)
except OSError as err:
raise _FileReadWriteError(err)
else:
if self._data_wrapper is not None:
data = self._data_wrapper(data)
return data
class BufferedIteratorProducer(object):
"""Producer for iterator objects with buffer capabilities."""
# how many times iterator.next() will be called before
# returning some data
loops = 20
def __init__(self, iterator):
self.iterator = iterator
def more(self):
"""Attempt a chunk of data from iterator by calling
its next() method different times.
"""
buffer = []
for x in xrange(self.loops):
try:
buffer.append(next(self.iterator))
except StopIteration:
break
return b''.join(buffer)
# --- FTP
class FTPHandler(AsyncChat):
"""Implements the FTP server Protocol Interpreter (see RFC-959),
handling commands received from the client on the control channel.
All relevant session information is stored in class attributes
reproduced below and can be modified before instantiating this
class.
- (int) timeout:
The timeout which is the maximum time a remote client may spend
between FTP commands. If the timeout triggers, the remote client
will be kicked off. Defaults to 300 seconds.
- (str) banner: the string sent when client connects.
- (int) max_login_attempts:
the maximum number of wrong authentications before disconnecting
the client (default 3).
- (bool)permit_foreign_addresses:
FTP site-to-site transfer feature: also referenced as "FXP" it
permits for transferring a file between two remote FTP servers
without the transfer going through the client's host (not
recommended for security reasons as described in RFC-2577).
Having this attribute set to False means that all data
connections from/to remote IP addresses which do not match the
client's IP address will be dropped (defualt False).
- (bool) permit_privileged_ports:
set to True if you want to permit active data connections (PORT)
over privileged ports (not recommended, defaulting to False).
- (str) masquerade_address:
the "masqueraded" IP address to provide along PASV reply when
pyftpdlib is running behind a NAT or other types of gateways.
When configured pyftpdlib will hide its local address and
instead use the public address of your NAT (default None).
- (dict) masquerade_address_map:
in case the server has multiple IP addresses which are all
behind a NAT router, you may wish to specify individual
masquerade_addresses for each of them. The map expects a
dictionary containing private IP addresses as keys, and their
corresponding public (masquerade) addresses as values.
- (list) passive_ports:
what ports the ftpd will use for its passive data transfers.
Value expected is a list of integers (e.g. range(60000, 65535)).
When configured pyftpdlib will no longer use kernel-assigned
random ports (default None).
- (bool) use_gmt_times:
when True causes the server to report all ls and MDTM times in
GMT and not local time (default True).
- (bool) use_sendfile: when True uses sendfile() system call to
send a file resulting in faster uploads (from server to client).
Works on UNIX only and requires pysendfile module to be
installed separately:
https://github.com/giampaolo/pysendfile/
Automatically defaults to True if pysendfile module is
installed.
- (bool) tcp_no_delay: controls the use of the TCP_NODELAY socket
option which disables the Nagle algorithm resulting in
significantly better performances (default True on all systems
where it is supported).
- (str) unicode_errors:
the error handler passed to ''.encode() and ''.decode():
http://docs.python.org/library/stdtypes.html#str.decode
(detaults to 'replace').
- (str) log_prefix:
the prefix string preceding any log line; all instance
attributes can be used as arguments.
All relevant instance attributes initialized when client connects
are reproduced below. You may be interested in them in case you
want to subclass the original FTPHandler.
- (bool) authenticated: True if client authenticated himself.
- (str) username: the name of the connected user (if any).
- (int) attempted_logins: number of currently attempted logins.
- (str) current_type: the current transfer type (default "a")
- (int) af: the connection's address family (IPv4/IPv6)
- (instance) server: the FTPServer class instance.
- (instance) data_channel: the data channel instance (if any).
"""
# these are overridable defaults
# default classes
authorizer = DummyAuthorizer()
active_dtp = ActiveDTP
passive_dtp = PassiveDTP
dtp_handler = DTPHandler
abstracted_fs = AbstractedFS
proto_cmds = proto_cmds
# session attributes (explained in the docstring)
timeout = 300
banner = "pyftpdlib %s ready." % __ver__
max_login_attempts = 3
permit_foreign_addresses = False
permit_privileged_ports = False
masquerade_address = None
masquerade_address_map = {}
passive_ports = None
use_gmt_times = True
use_sendfile = sendfile is not None
tcp_no_delay = hasattr(socket, "TCP_NODELAY")
unicode_errors = 'replace'
log_prefix = '%(remote_ip)s:%(remote_port)s-[%(username)s]'
auth_failed_timeout = 3
def __init__(self, conn, server, ioloop=None):
"""Initialize the command channel.
- (instance) conn: the socket object instance of the newly
established connection.
- (instance) server: the ftp server class instance.
"""
# public session attributes
self.server = server
self.fs = None
self.authenticated = False
self.username = ""
self.password = ""
self.attempted_logins = 0
self.data_channel = None
self.remote_ip = ""
self.remote_port = ""
self.started = time.time()
# private session attributes
self._last_response = ""
self._current_type = 'a'
self._restart_position = 0
self._quit_pending = False
self._in_buffer = []
self._in_buffer_len = 0
self._epsvall = False
self._dtp_acceptor = None
self._dtp_connector = None
self._in_dtp_queue = None
self._out_dtp_queue = None
self._extra_feats = []
self._current_facts = ['type', 'perm', 'size', 'modify']
self._rnfr = None
self._idler = None
self._log_debug = logging.getLogger('pyftpdlib').getEffectiveLevel() \
<= logging.DEBUG
if os.name == 'posix':
self._current_facts.append('unique')
self._available_facts = self._current_facts[:]
if pwd and grp:
self._available_facts += ['unix.mode', 'unix.uid', 'unix.gid']
if os.name == 'nt':
self._available_facts.append('create')
try:
AsyncChat.__init__(self, conn, ioloop=ioloop)
except socket.error as err:
# if we get an exception here we want the dispatcher
# instance to set socket attribute before closing, see:
# https://github.com/giampaolo/pyftpdlib/issues/188
AsyncChat.__init__(self, socket.socket(), ioloop=ioloop)
self.close()
debug("call: FTPHandler.__init__, err %r" % err, self)
if err.errno == errno.EINVAL:
# https://github.com/giampaolo/pyftpdlib/issues/143
return
self.handle_error()
return
self.set_terminator(b"\r\n")
# connection properties
try:
self.remote_ip, self.remote_port = self.socket.getpeername()[:2]
except socket.error as err:
debug("call: FTPHandler.__init__, err on getpeername() %r" % err,
self)
# A race condition may occur if the other end is closing
# before we can get the peername, hence ENOTCONN (see issue
# #100) while EINVAL can occur on OSX (see issue #143).
self.connected = False
if err.errno in (errno.ENOTCONN, errno.EINVAL):
self.close()
else:
self.handle_error()
return
else:
self.log("FTP session opened (connect)")
# try to handle urgent data inline
try:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
except socket.error as err:
debug("call: FTPHandler.__init__, err on SO_OOBINLINE %r" % err,
self)
# disable Nagle algorithm for the control socket only, resulting
# in significantly better performances
if self.tcp_no_delay:
try:
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
except socket.error as err:
debug(
"call: FTPHandler.__init__, err on TCP_NODELAY %r" % err,
self)
# remove this instance from IOLoop's socket_map
if not self.connected:
self.close()
return
if self.timeout:
self._idler = self.ioloop.call_later(
self.timeout, self.handle_timeout, _errback=self.handle_error)
def get_repr_info(self, as_str=False, extra_info={}):
info = OrderedDict()
info['id'] = id(self)
info['addr'] = "%s:%s" % (self.remote_ip, self.remote_port)
if _is_ssl_sock(self.socket):
info['ssl'] = True
if self.username:
info['user'] = self.username
# If threads are involved sometimes "self" may be None (?!?).
dc = getattr(self, 'data_channel', None)
if dc is not None:
if _is_ssl_sock(dc.socket):
info['ssl-data'] = True
if dc.file_obj:
if self.data_channel.receive:
info['sending-file'] = dc.file_obj
if dc.use_sendfile():
info['use-sendfile(2)'] = True
else:
info['receiving-file'] = dc.file_obj
info['bytes-trans'] = dc.get_transmitted_bytes()
info.update(extra_info)
if as_str:
return ', '.join(['%s=%r' % (k, v) for (k, v) in info.items()])
return info
def __repr__(self):
return '<%s(%s)>' % (self.__class__.__name__, self.get_repr_info(True))
__str__ = __repr__
def handle(self):
"""Return a 220 'ready' response to the client over the command
channel.
"""
self.on_connect()
if not self._closed and not self._closing:
if len(self.banner) <= 75:
self.respond("220 %s" % str(self.banner))
else:
self.push('220-%s\r\n' % str(self.banner))
self.respond('220 ')
def handle_max_cons(self):
"""Called when limit for maximum number of connections is reached."""
msg = "421 Too many connections. Service temporarily unavailable."
self.respond_w_warning(msg)
# If self.push is used, data could not be sent immediately in
# which case a new "loop" will occur exposing us to the risk of
# accepting new connections. Since this could cause asyncore to
# run out of fds in case we're using select() on Windows we
# immediately close the channel by using close() instead of
# close_when_done(). If data has not been sent yet client will
# be silently disconnected.
self.close()
def handle_max_cons_per_ip(self):
"""Called when too many clients are connected from the same IP."""
msg = "421 Too many connections from the same IP address."
self.respond_w_warning(msg)
self.close_when_done()
def handle_timeout(self):
"""Called when client does not send any command within the time
specified in <timeout> attribute."""
msg = "Control connection timed out."
self.respond("421 " + msg, logfun=logger.info)
self.close_when_done()
# --- asyncore / asynchat overridden methods
def readable(self):
# Checking for self.connected seems to be necessary as per:
# https://github.com/giampaolo/pyftpdlib/issues/188#c18
# In contrast to DTPHandler, here we are not interested in
# attempting to receive any further data from a closed socket.
return self.connected and AsyncChat.readable(self)
def writable(self):
return self.connected and AsyncChat.writable(self)
def collect_incoming_data(self, data):
"""Read incoming data and append to the input buffer."""
self._in_buffer.append(data)
self._in_buffer_len += len(data)
# Flush buffer if it gets too long (possible DoS attacks).
# RFC-959 specifies that a 500 response could be given in
# such cases
buflimit = 2048
if self._in_buffer_len > buflimit:
self.respond_w_warning('500 Command too long.')
self._in_buffer = []
self._in_buffer_len = 0
def decode(self, bytes):
return bytes.decode('utf8', self.unicode_errors)
def found_terminator(self):
r"""Called when the incoming data stream matches the \r\n
terminator.
"""
if self._idler is not None and not self._idler.cancelled:
self._idler.reset()
line = b''.join(self._in_buffer)
try:
line = self.decode(line)
except UnicodeDecodeError:
# By default we'll never get here as we replace errors
# but user might want to override this behavior.
# RFC-2640 doesn't mention what to do in this case so
# we'll just return 501 (bad arg).
return self.respond("501 Can't decode command.")
self._in_buffer = []
self._in_buffer_len = 0
cmd = line.split(' ')[0].upper()
arg = line[len(cmd) + 1:]
try:
self.pre_process_command(line, cmd, arg)
except UnicodeEncodeError:
self.respond("501 can't decode path (server filesystem encoding "
"is %s)" % sys.getfilesystemencoding())
def pre_process_command(self, line, cmd, arg):
kwargs = {}
if cmd == "SITE" and arg:
cmd = "SITE %s" % arg.split(' ')[0].upper()
arg = line[len(cmd) + 1:]
if cmd != 'PASS':
self.logline("<- %s" % line)
else:
self.logline("<- %s %s" % (line.split(' ')[0], '*' * 6))
# Recognize those commands having a "special semantic". They
# should be sent by following the RFC-959 procedure of sending
# Telnet IP/Synch sequence (chr 242 and 255) as OOB data but
# since many ftp clients don't do it correctly we check the
# last 4 characters only.
if cmd not in self.proto_cmds:
if cmd[-4:] in ('ABOR', 'STAT', 'QUIT'):
cmd = cmd[-4:]
else:
msg = 'Command "%s" not understood.' % cmd
self.respond('500 ' + msg)
if cmd:
self.log_cmd(cmd, arg, 500, msg)
return
if not arg and self.proto_cmds[cmd]['arg'] == True: # NOQA
msg = "Syntax error: command needs an argument."
self.respond("501 " + msg)
self.log_cmd(cmd, "", 501, msg)
return
if arg and self.proto_cmds[cmd]['arg'] == False: # NOQA
msg = "Syntax error: command does not accept arguments."
self.respond("501 " + msg)
self.log_cmd(cmd, arg, 501, msg)
return
if not self.authenticated:
if self.proto_cmds[cmd]['auth'] or (cmd == 'STAT' and arg):
msg = "Log in with USER and PASS first."
self.respond("530 " + msg)
self.log_cmd(cmd, arg, 530, msg)
else:
# call the proper ftp_* method
self.process_command(cmd, arg)
return
else:
if (cmd == 'STAT') and not arg:
self.ftp_STAT(u(''))
return
# for file-system related commands check whether real path
# destination is valid
if self.proto_cmds[cmd]['perm'] and (cmd != 'STOU'):
if cmd in ('CWD', 'XCWD'):
arg = self.fs.ftp2fs(arg or u('/'))
elif cmd in ('CDUP', 'XCUP'):
arg = self.fs.ftp2fs(u('..'))
elif cmd == 'LIST':
if arg.lower() in ('-a', '-l', '-al', '-la'):
arg = self.fs.ftp2fs(self.fs.cwd)
else:
arg = self.fs.ftp2fs(arg or self.fs.cwd)
elif cmd == 'STAT':
if glob.has_magic(arg):
msg = 'Globbing not supported.'
self.respond('550 ' + msg)
self.log_cmd(cmd, arg, 550, msg)
return
arg = self.fs.ftp2fs(arg or self.fs.cwd)
elif cmd == 'SITE CHMOD':
if ' ' not in arg:
msg = "Syntax error: command needs two arguments."
self.respond("501 " + msg)
self.log_cmd(cmd, "", 501, msg)
return
else:
mode, arg = arg.split(' ', 1)
arg = self.fs.ftp2fs(arg)
kwargs = dict(mode=mode)
elif cmd == 'MFMT':
if ' ' not in arg:
msg = "Syntax error: command needs two arguments."
self.respond("501 " + msg)
self.log_cmd(cmd, "", 501, msg)
return
else:
timeval, arg = arg.split(' ', 1)
arg = self.fs.ftp2fs(arg)
kwargs = dict(timeval=timeval)
else: # LIST, NLST, MLSD, MLST
arg = self.fs.ftp2fs(arg or self.fs.cwd)
if not self.fs.validpath(arg):
line = self.fs.fs2ftp(arg)
msg = '"%s" points to a path which is outside ' \
"the user's root directory" % line
self.respond("550 %s." % msg)
self.log_cmd(cmd, arg, 550, msg)
return
# check permission
perm = self.proto_cmds[cmd]['perm']
if perm is not None and cmd != 'STOU':
if not self.authorizer.has_perm(self.username, perm, arg):
msg = "Not enough privileges."
self.respond("550 " + msg)
self.log_cmd(cmd, arg, 550, msg)
return
# call the proper ftp_* method
self.process_command(cmd, arg, **kwargs)
def process_command(self, cmd, *args, **kwargs):
"""Process command by calling the corresponding ftp_* class
method (e.g. for received command "MKD pathname", ftp_MKD()
method is called with "pathname" as the argument).
"""
if self._closed:
return
self._last_response = ""
method = getattr(self, 'ftp_' + cmd.replace(' ', '_'))
method(*args, **kwargs)
if self._last_response:
code = int(self._last_response[:3])
resp = self._last_response[4:]
self.log_cmd(cmd, args[0], code, resp)
def handle_error(self):
try:
self.log_exception(self)
self.close()
except Exception:
logger.critical(traceback.format_exc())
def handle_close(self):
self.close()
def close(self):
"""Close the current channel disconnecting the client."""
debug("call: close()", inst=self)
if not self._closed:
AsyncChat.close(self)
self._shutdown_connecting_dtp()
if self.data_channel is not None:
self.data_channel.close()
del self.data_channel
if self._out_dtp_queue is not None:
file = self._out_dtp_queue[2]
if file is not None:
file.close()
if self._in_dtp_queue is not None:
file = self._in_dtp_queue[0]
if file is not None:
file.close()
del self._out_dtp_queue
del self._in_dtp_queue
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
# remove client IP address from ip map
if self.remote_ip in self.server.ip_map:
self.server.ip_map.remove(self.remote_ip)
if self.fs is not None:
self.fs.cmd_channel = None
self.fs = None
self.log("FTP session closed (disconnect).")
# Having self.remote_ip not set means that no connection
# actually took place, hence we're not interested in
# invoking the callback.
if self.remote_ip:
self.ioloop.call_later(0, self.on_disconnect,
_errback=self.handle_error)
def _shutdown_connecting_dtp(self):
"""Close any ActiveDTP or PassiveDTP instance waiting to
establish a connection (passive or active).
"""
if self._dtp_acceptor is not None:
self._dtp_acceptor.close()
self._dtp_acceptor = None
if self._dtp_connector is not None:
self._dtp_connector.close()
self._dtp_connector = None
# --- public callbacks
# Note: to run a time consuming task make sure to use a separate
# process or thread (see FAQs).
def on_connect(self):
"""Called when client connects, *before* sending the initial
220 reply.
"""
def on_disconnect(self):
"""Called when connection is closed."""
def on_login(self, username):
"""Called on user login."""
def on_login_failed(self, username, password):
"""Called on failed login attempt.
At this point client might have already been disconnected if it
failed too many times.
"""
def on_logout(self, username):
"""Called when user "cleanly" logs out due to QUIT or USER
issued twice (re-login). This is not called if the connection
is simply closed by client.
"""
def on_file_sent(self, file):
"""Called every time a file has been successfully sent.
"file" is the absolute name of the file just being sent.
"""
def on_file_received(self, file):
"""Called every time a file has been successfully received.
"file" is the absolute name of the file just being received.
"""
def on_incomplete_file_sent(self, file):
"""Called every time a file has not been entirely sent.
(e.g. ABOR during transfer or client disconnected).
"file" is the absolute name of that file.
"""
def on_incomplete_file_received(self, file):
"""Called every time a file has not been entirely received
(e.g. ABOR during transfer or client disconnected).
"file" is the absolute name of that file.
"""
# --- internal callbacks
def _on_dtp_connection(self):
"""Called every time data channel connects, either active or
passive.
Incoming and outgoing queues are checked for pending data.
If outbound data is pending, it is pushed into the data channel.
If awaiting inbound data, the data channel is enabled for
receiving.
"""
# Close accepting DTP only. By closing ActiveDTP DTPHandler
# would receive a closed socket object.
# self._shutdown_connecting_dtp()
if self._dtp_acceptor is not None:
self._dtp_acceptor.close()
self._dtp_acceptor = None
# stop the idle timer as long as the data transfer is not finished
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
# check for data to send
if self._out_dtp_queue is not None:
data, isproducer, file, cmd = self._out_dtp_queue
self._out_dtp_queue = None
self.data_channel.cmd = cmd
if file:
self.data_channel.file_obj = file
try:
if not isproducer:
self.data_channel.push(data)
else:
self.data_channel.push_with_producer(data)
if self.data_channel is not None:
self.data_channel.close_when_done()
except Exception:
# dealing with this exception is up to DTP (see bug #84)
self.data_channel.handle_error()
# check for data to receive
elif self._in_dtp_queue is not None:
file, cmd = self._in_dtp_queue
self.data_channel.file_obj = file
self._in_dtp_queue = None
self.data_channel.enable_receiving(self._current_type, cmd)
def _on_dtp_close(self):
"""Called every time the data channel is closed."""
self.data_channel = None
if self._quit_pending:
self.close()
elif self.timeout:
# data transfer finished, restart the idle timer
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
self._idler = self.ioloop.call_later(
self.timeout, self.handle_timeout, _errback=self.handle_error)
# --- utility
def push(self, s):
asynchat.async_chat.push(self, s.encode('utf8'))
def respond(self, resp, logfun=logger.debug):
"""Send a response to the client using the command channel."""
self._last_response = resp
self.push(resp + '\r\n')
if self._log_debug:
self.logline('-> %s' % resp, logfun=logfun)
else:
self.log(resp[4:], logfun=logfun)
def respond_w_warning(self, resp):
self.respond(resp, logfun=logger.warning)
def push_dtp_data(self, data, isproducer=False, file=None, cmd=None):
"""Pushes data into the data channel.
It is usually called for those commands requiring some data to
be sent over the data channel (e.g. RETR).
If data channel does not exist yet, it queues the data to send
later; data will then be pushed into data channel when
_on_dtp_connection() will be called.
- (str/classobj) data: the data to send which may be a string
or a producer object).
- (bool) isproducer: whether treat data as a producer.
- (file) file: the file[-like] object to send (if any).
"""
if self.data_channel is not None:
self.respond(
"125 Data connection already open. Transfer starting.")
if file:
self.data_channel.file_obj = file
try:
if not isproducer:
self.data_channel.push(data)
else:
self.data_channel.push_with_producer(data)
if self.data_channel is not None:
self.data_channel.cmd = cmd
self.data_channel.close_when_done()
except Exception:
# dealing with this exception is up to DTP (see bug #84)
self.data_channel.handle_error()
else:
self.respond(
"150 File status okay. About to open data connection.")
self._out_dtp_queue = (data, isproducer, file, cmd)
def flush_account(self):
"""Flush account information by clearing attributes that need
to be reset on a REIN or new USER command.
"""
self._shutdown_connecting_dtp()
# if there's a transfer in progress RFC-959 states we are
# supposed to let it finish
if self.data_channel is not None:
if not self.data_channel.transfer_in_progress():
self.data_channel.close()
self.data_channel = None
username = self.username
if self.authenticated and username:
self.on_logout(username)
self.authenticated = False
self.username = ""
self.password = ""
self.attempted_logins = 0
self._current_type = 'a'
self._restart_position = 0
self._quit_pending = False
self._in_dtp_queue = None
self._rnfr = None
self._out_dtp_queue = None
def run_as_current_user(self, function, *args, **kwargs):
"""Execute a function impersonating the current logged-in user."""
self.authorizer.impersonate_user(self.username, self.password)
try:
return function(*args, **kwargs)
finally:
self.authorizer.terminate_impersonation(self.username)
# --- logging wrappers
# this is defined earlier
# log_prefix = '%(remote_ip)s:%(remote_port)s-[%(username)s]'
def log(self, msg, logfun=logger.info):
"""Log a message, including additional identifying session data."""
prefix = self.log_prefix % self.__dict__
logfun("%s %s" % (prefix, msg))
def logline(self, msg, logfun=logger.debug):
"""Log a line including additional identifying session data.
By default this is disabled unless logging level == DEBUG.
"""
if self._log_debug:
prefix = self.log_prefix % self.__dict__
logfun("%s %s" % (prefix, msg))
def logerror(self, msg):
"""Log an error including additional identifying session data."""
prefix = self.log_prefix % self.__dict__
logger.error("%s %s" % (prefix, msg))
def log_exception(self, instance):
"""Log an unhandled exception. 'instance' is the instance
where the exception was generated.
"""
logger.exception("unhandled exception in instance %r", instance)
# the list of commands which gets logged when logging level
# is >= logging.INFO
log_cmds_list = ["DELE", "RNFR", "RNTO", "MKD", "RMD", "CWD",
"XMKD", "XRMD", "XCWD",
"REIN", "SITE CHMOD", "MFMT"]
def log_cmd(self, cmd, arg, respcode, respstr):
"""Log commands and responses in a standardized format.
This is disabled in case the logging level is set to DEBUG.
- (str) cmd:
the command sent by client
- (str) arg:
the command argument sent by client.
For filesystem commands such as DELE, MKD, etc. this is
already represented as an absolute real filesystem path
like "/home/user/file.ext".
- (int) respcode:
the response code as being sent by server. Response codes
starting with 4xx or 5xx are returned if the command has
been rejected for some reason.
- (str) respstr:
the response string as being sent by server.
By default only DELE, RMD, RNTO, MKD, CWD, ABOR, REIN, SITE CHMOD
commands are logged and the output is redirected to self.log
method.
Can be overridden to provide alternate formats or to log
further commands.
"""
if not self._log_debug and cmd in self.log_cmds_list:
line = '%s %s' % (' '.join([cmd, arg]).strip(), respcode)
if str(respcode)[0] in ('4', '5'):
line += ' %r' % respstr
self.log(line)
def log_transfer(self, cmd, filename, receive, completed, elapsed, bytes):
"""Log all file transfers in a standardized format.
- (str) cmd:
the original command who caused the transfer.
- (str) filename:
the absolutized name of the file on disk.
- (bool) receive:
True if the transfer was used for client uploading (STOR,
STOU, APPE), False otherwise (RETR).
- (bool) completed:
True if the file has been entirely sent, else False.
- (float) elapsed:
transfer elapsed time in seconds.
- (int) bytes:
number of bytes transmitted.
"""
line = '%s %s completed=%s bytes=%s seconds=%s' % \
(cmd, filename, completed and 1 or 0, bytes, elapsed)
self.log(line)
# --- connection
def _make_eport(self, ip, port):
"""Establish an active data channel with remote client which
issued a PORT or EPRT command.
"""
# FTP bounce attacks protection: according to RFC-2577 it's
# recommended to reject PORT if IP address specified in it
# does not match client IP address.
remote_ip = self.remote_ip
if remote_ip.startswith('::ffff:'):
# In this scenario, the server has an IPv6 socket, but
# the remote client is using IPv4 and its address is
# represented as an IPv4-mapped IPv6 address which
# looks like this ::ffff:151.12.5.65, see:
# http://en.wikipedia.org/wiki/IPv6#IPv4-mapped_addresses
# http://tools.ietf.org/html/rfc3493.html#section-3.7
# We truncate the first bytes to make it look like a
# common IPv4 address.
remote_ip = remote_ip[7:]
if not self.permit_foreign_addresses and ip != remote_ip:
msg = "501 Rejected data connection to foreign address %s:%s." \
% (ip, port)
self.respond_w_warning(msg)
return
# ...another RFC-2577 recommendation is rejecting connections
# to privileged ports (< 1024) for security reasons.
if not self.permit_privileged_ports and port < 1024:
msg = '501 PORT against the privileged port "%s" refused.' % port
self.respond_w_warning(msg)
return
# close establishing DTP instances, if any
self._shutdown_connecting_dtp()
if self.data_channel is not None:
self.data_channel.close()
self.data_channel = None
# make sure we are not hitting the max connections limit
if not self.server._accept_new_cons():
msg = "425 Too many connections. Can't open data channel."
self.respond_w_warning(msg)
return
# open data channel
self._dtp_connector = self.active_dtp(ip, port, self)
def _make_epasv(self, extmode=False):
"""Initialize a passive data channel with remote client which
issued a PASV or EPSV command.
If extmode argument is True we assume that client issued EPSV in
which case extended passive mode will be used (see RFC-2428).
"""
# close establishing DTP instances, if any
self._shutdown_connecting_dtp()
# close established data connections, if any
if self.data_channel is not None:
self.data_channel.close()
self.data_channel = None
# make sure we are not hitting the max connections limit
if not self.server._accept_new_cons():
msg = "425 Too many connections. Can't open data channel."
self.respond_w_warning(msg)
return
# open data channel
self._dtp_acceptor = self.passive_dtp(self, extmode)
def ftp_PORT(self, line):
"""Start an active data channel by using IPv4."""
if self._epsvall:
self.respond("501 PORT not allowed after EPSV ALL.")
return
# Parse PORT request for getting IP and PORT.
# Request comes in as:
# > h1,h2,h3,h4,p1,p2
# ...where the client's IP address is h1.h2.h3.h4 and the TCP
# port number is (p1 * 256) + p2.
try:
addr = list(map(int, line.split(',')))
if len(addr) != 6:
raise ValueError
for x in addr[:4]:
if not 0 <= x <= 255:
raise ValueError
ip = '%d.%d.%d.%d' % tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
if not 0 <= port <= 65535:
raise ValueError
except (ValueError, OverflowError):
self.respond("501 Invalid PORT format.")
return
self._make_eport(ip, port)
def ftp_EPRT(self, line):
"""Start an active data channel by choosing the network protocol
to use (IPv4/IPv6) as defined in RFC-2428.
"""
if self._epsvall:
self.respond("501 EPRT not allowed after EPSV ALL.")
return
# Parse EPRT request for getting protocol, IP and PORT.
# Request comes in as:
# <d>proto<d>ip<d>port<d>
# ...where <d> is an arbitrary delimiter character (usually "|") and
# <proto> is the network protocol to use (1 for IPv4, 2 for IPv6).
try:
af, ip, port = line.split(line[0])[1:-1]
port = int(port)
if not 0 <= port <= 65535:
raise ValueError
except (ValueError, IndexError, OverflowError):
self.respond("501 Invalid EPRT format.")
return
if af == "1":
# test if AF_INET6 and IPV6_V6ONLY
if (self.socket.family == socket.AF_INET6 and not
SUPPORTS_HYBRID_IPV6):
self.respond('522 Network protocol not supported (use 2).')
else:
try:
octs = list(map(int, ip.split('.')))
if len(octs) != 4:
raise ValueError
for x in octs:
if not 0 <= x <= 255:
raise ValueError
except (ValueError, OverflowError):
self.respond("501 Invalid EPRT format.")
else:
self._make_eport(ip, port)
elif af == "2":
if self.socket.family == socket.AF_INET:
self.respond('522 Network protocol not supported (use 1).')
else:
self._make_eport(ip, port)
else:
if self.socket.family == socket.AF_INET:
self.respond('501 Unknown network protocol (use 1).')
else:
self.respond('501 Unknown network protocol (use 2).')
def ftp_PASV(self, line):
"""Start a passive data channel by using IPv4."""
if self._epsvall:
self.respond("501 PASV not allowed after EPSV ALL.")
return
self._make_epasv(extmode=False)
def ftp_EPSV(self, line):
"""Start a passive data channel by using IPv4 or IPv6 as defined
in RFC-2428.
"""
# RFC-2428 specifies that if an optional parameter is given,
# we have to determine the address family from that otherwise
# use the same address family used on the control connection.
# In such a scenario a client may use IPv4 on the control channel
# and choose to use IPv6 for the data channel.
# But how could we use IPv6 on the data channel without knowing
# which IPv6 address to use for binding the socket?
# Unfortunately RFC-2428 does not provide satisfying information
# on how to do that. The assumption is that we don't have any way
# to know wich address to use, hence we just use the same address
# family used on the control connection.
if not line:
self._make_epasv(extmode=True)
# IPv4
elif line == "1":
if self.socket.family != socket.AF_INET:
self.respond('522 Network protocol not supported (use 2).')
else:
self._make_epasv(extmode=True)
# IPv6
elif line == "2":
if self.socket.family == socket.AF_INET:
self.respond('522 Network protocol not supported (use 1).')
else:
self._make_epasv(extmode=True)
elif line.lower() == 'all':
self._epsvall = True
self.respond(
'220 Other commands other than EPSV are now disabled.')
else:
if self.socket.family == socket.AF_INET:
self.respond('501 Unknown network protocol (use 1).')
else:
self.respond('501 Unknown network protocol (use 2).')
def ftp_QUIT(self, line):
"""Quit the current session disconnecting the client."""
if self.authenticated:
msg_quit = self.authorizer.get_msg_quit(self.username)
else:
msg_quit = "Goodbye."
if len(msg_quit) <= 75:
self.respond("221 %s" % msg_quit)
else:
self.push("221-%s\r\n" % msg_quit)
self.respond("221 ")
# From RFC-959:
# If file transfer is in progress, the connection must remain
# open for result response and the server will then close it.
# We also stop responding to any further command.
if self.data_channel:
self._quit_pending = True
self.del_channel()
else:
self._shutdown_connecting_dtp()
self.close_when_done()
if self.authenticated and self.username:
self.on_logout(self.username)
# --- data transferring
def ftp_LIST(self, path):
"""Return a list of files in the specified directory to the
client.
On success return the directory path, else None.
"""
# - If no argument, fall back on cwd as default.
# - Some older FTP clients erroneously issue /bin/ls-like LIST
# formats in which case we fall back on cwd as default.
try:
isdir = self.fs.isdir(path)
if isdir:
listing = self.run_as_current_user(self.fs.listdir, path)
if isinstance(listing, list):
try:
# RFC 959 recommends the listing to be sorted.
listing.sort()
except UnicodeDecodeError:
# (Python 2 only) might happen on filesystem not
# supporting UTF8 meaning os.listdir() returned a list
# of mixed bytes and unicode strings:
# http://goo.gl/6DLHD
# http://bugs.python.org/issue683592
pass
iterator = self.fs.format_list(path, listing)
else:
basedir, filename = os.path.split(path)
self.fs.lstat(path) # raise exc in case of problems
iterator = self.fs.format_list(basedir, [filename])
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
producer = BufferedIteratorProducer(iterator)
self.push_dtp_data(producer, isproducer=True, cmd="LIST")
return path
def ftp_NLST(self, path):
"""Return a list of files in the specified directory in a
compact form to the client.
On success return the directory path, else None.
"""
try:
if self.fs.isdir(path):
listing = list(self.run_as_current_user(self.fs.listdir, path))
else:
# if path is a file we just list its name
self.fs.lstat(path) # raise exc in case of problems
listing = [os.path.basename(path)]
except (OSError, FilesystemError) as err:
self.respond('550 %s.' % _strerror(err))
else:
data = ''
if listing:
try:
listing.sort()
except UnicodeDecodeError:
# (Python 2 only) might happen on filesystem not
# supporting UTF8 meaning os.listdir() returned a list
# of mixed bytes and unicode strings:
# http://goo.gl/6DLHD
# http://bugs.python.org/issue683592
ls = []
for x in listing:
if not isinstance(x, unicode):
x = unicode(x, 'utf8')
ls.append(x)
listing = sorted(ls)
data = '\r\n'.join(listing) + '\r\n'
data = data.encode('utf8', self.unicode_errors)
self.push_dtp_data(data, cmd="NLST")
return path
# --- MLST and MLSD commands
# The MLST and MLSD commands are intended to standardize the file and
# directory information returned by the server-FTP process. These
# commands differ from the LIST command in that the format of the
# replies is strictly defined although extensible.
def ftp_MLST(self, path):
"""Return information about a pathname in a machine-processable
form as defined in RFC-3659.
On success return the path just listed, else None.
"""
line = self.fs.fs2ftp(path)
basedir, basename = os.path.split(path)
perms = self.authorizer.get_perms(self.username)
try:
iterator = self.run_as_current_user(
self.fs.format_mlsx, basedir, [basename], perms,
self._current_facts, ignore_err=False)
data = b''.join(iterator)
except (OSError, FilesystemError) as err:
self.respond('550 %s.' % _strerror(err))
else:
data = data.decode('utf8', self.unicode_errors)
# since TVFS is supported (see RFC-3659 chapter 6), a fully
# qualified pathname should be returned
data = data.split(' ')[0] + ' %s\r\n' % line
# response is expected on the command channel
self.push('250-Listing "%s":\r\n' % line)
# the fact set must be preceded by a space
self.push(' ' + data)
self.respond('250 End MLST.')
return path
def ftp_MLSD(self, path):
"""Return contents of a directory in a machine-processable form
as defined in RFC-3659.
On success return the path just listed, else None.
"""
# RFC-3659 requires 501 response code if path is not a directory
if not self.fs.isdir(path):
self.respond("501 No such directory.")
return
try:
listing = self.run_as_current_user(self.fs.listdir, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
perms = self.authorizer.get_perms(self.username)
iterator = self.fs.format_mlsx(path, listing, perms,
self._current_facts)
producer = BufferedIteratorProducer(iterator)
self.push_dtp_data(producer, isproducer=True, cmd="MLSD")
return path
def ftp_RETR(self, file):
"""Retrieve the specified file (transfer from the server to the
client). On success return the file path else None.
"""
rest_pos = self._restart_position
self._restart_position = 0
try:
fd = self.run_as_current_user(self.fs.open, file, 'rb')
except (EnvironmentError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
return
try:
if rest_pos:
# Make sure that the requested offset is valid (within the
# size of the file being resumed).
# According to RFC-1123 a 554 reply may result in case that
# the existing file cannot be repositioned as specified in
# the REST.
ok = 0
try:
fsize = self.fs.getsize(file)
if rest_pos > fsize:
raise ValueError
fd.seek(rest_pos)
ok = 1
except ValueError:
why = "REST position (%s) > file size (%s)" % (
rest_pos, fsize)
except (EnvironmentError, FilesystemError) as err:
why = _strerror(err)
if not ok:
fd.close()
self.respond('554 %s' % why)
return
producer = FileProducer(fd, self._current_type)
self.push_dtp_data(producer, isproducer=True, file=fd, cmd="RETR")
return file
except Exception:
fd.close()
raise
def ftp_STOR(self, file, mode='w'):
"""Store a file (transfer from the client to the server).
On success return the file path, else None.
"""
# A resume could occur in case of APPE or REST commands.
# In that case we have to open file object in different ways:
# STOR: mode = 'w'
# APPE: mode = 'a'
# REST: mode = 'r+' (to permit seeking on file object)
if 'a' in mode:
cmd = 'APPE'
else:
cmd = 'STOR'
rest_pos = self._restart_position
self._restart_position = 0
if rest_pos:
mode = 'r+'
try:
fd = self.run_as_current_user(self.fs.open, file, mode + 'b')
except (EnvironmentError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
return
try:
if rest_pos:
# Make sure that the requested offset is valid (within the
# size of the file being resumed).
# According to RFC-1123 a 554 reply may result in case
# that the existing file cannot be repositioned as
# specified in the REST.
ok = 0
try:
fsize = self.fs.getsize(file)
if rest_pos > fsize:
raise ValueError
fd.seek(rest_pos)
ok = 1
except ValueError:
why = "REST position (%s) > file size (%s)" % (
rest_pos, fsize)
except (EnvironmentError, FilesystemError) as err:
why = _strerror(err)
if not ok:
fd.close()
self.respond('554 %s' % why)
return
if self.data_channel is not None:
resp = "Data connection already open. Transfer starting."
self.respond("125 " + resp)
self.data_channel.file_obj = fd
self.data_channel.enable_receiving(self._current_type, cmd)
else:
resp = "File status okay. About to open data connection."
self.respond("150 " + resp)
self._in_dtp_queue = (fd, cmd)
return file
except Exception:
fd.close()
raise
def ftp_STOU(self, line):
"""Store a file on the server with a unique name.
On success return the file path, else None.
"""
# Note 1: RFC-959 prohibited STOU parameters, but this
# prohibition is obsolete.
# Note 2: 250 response wanted by RFC-959 has been declared
# incorrect in RFC-1123 that wants 125/150 instead.
# Note 3: RFC-1123 also provided an exact output format
# defined to be as follow:
# > 125 FILE: pppp
# ...where pppp represents the unique path name of the
# file that will be written.
# watch for STOU preceded by REST, which makes no sense.
if self._restart_position:
self.respond("450 Can't STOU while REST request is pending.")
return
if line:
basedir, prefix = os.path.split(self.fs.ftp2fs(line))
prefix = prefix + '.'
else:
basedir = self.fs.ftp2fs(self.fs.cwd)
prefix = 'ftpd.'
try:
fd = self.run_as_current_user(self.fs.mkstemp, prefix=prefix,
dir=basedir)
except (EnvironmentError, FilesystemError) as err:
# likely, we hit the max number of retries to find out a
# file with a unique name
if getattr(err, "errno", -1) == errno.EEXIST:
why = 'No usable unique file name found'
# something else happened
else:
why = _strerror(err)
self.respond("450 %s." % why)
return
try:
if not self.authorizer.has_perm(self.username, 'w', fd.name):
try:
fd.close()
self.run_as_current_user(self.fs.remove, fd.name)
except (OSError, FilesystemError):
pass
self.respond("550 Not enough privileges.")
return
# now just acts like STOR except that restarting isn't allowed
filename = os.path.basename(fd.name)
if self.data_channel is not None:
self.respond("125 FILE: %s" % filename)
self.data_channel.file_obj = fd
self.data_channel.enable_receiving(self._current_type, "STOU")
else:
self.respond("150 FILE: %s" % filename)
self._in_dtp_queue = (fd, "STOU")
return filename
except Exception:
fd.close()
raise
def ftp_APPE(self, file):
"""Append data to an existing file on the server.
On success return the file path, else None.
"""
# watch for APPE preceded by REST, which makes no sense.
if self._restart_position:
self.respond("450 Can't APPE while REST request is pending.")
else:
return self.ftp_STOR(file, mode='a')
def ftp_REST(self, line):
"""Restart a file transfer from a previous mark."""
if self._current_type == 'a':
self.respond('501 Resuming transfers not allowed in ASCII mode.')
return
try:
marker = int(line)
if marker < 0:
raise ValueError
except (ValueError, OverflowError):
self.respond("501 Invalid parameter.")
else:
self.respond("350 Restarting at position %s." % marker)
self._restart_position = marker
def ftp_ABOR(self, line):
"""Abort the current data transfer."""
# ABOR received while no data channel exists
if self._dtp_acceptor is None and \
self._dtp_connector is None and \
self.data_channel is None:
self.respond("225 No transfer to abort.")
return
else:
# a PASV or PORT was received but connection wasn't made yet
if self._dtp_acceptor is not None or \
self._dtp_connector is not None:
self._shutdown_connecting_dtp()
resp = "225 ABOR command successful; data channel closed."
# If a data transfer is in progress the server must first
# close the data connection, returning a 426 reply to
# indicate that the transfer terminated abnormally, then it
# must send a 226 reply, indicating that the abort command
# was successfully processed.
# If no data has been transmitted we just respond with 225
# indicating that no transfer was in progress.
if self.data_channel is not None:
if self.data_channel.transfer_in_progress():
self.data_channel.close()
self.data_channel = None
self.respond("426 Transfer aborted via ABOR.",
logfun=logger.info)
resp = "226 ABOR command successful."
else:
self.data_channel.close()
self.data_channel = None
resp = "225 ABOR command successful; data channel closed."
self.respond(resp)
# --- authentication
def ftp_USER(self, line):
"""Set the username for the current session."""
# RFC-959 specifies a 530 response to the USER command if the
# username is not valid. If the username is valid is required
# ftpd returns a 331 response instead. In order to prevent a
# malicious client from determining valid usernames on a server,
# it is suggested by RFC-2577 that a server always return 331 to
# the USER command and then reject the combination of username
# and password for an invalid username when PASS is provided later.
if not self.authenticated:
self.respond('331 Username ok, send password.')
else:
# a new USER command could be entered at any point in order
# to change the access control flushing any user, password,
# and account information already supplied and beginning the
# login sequence again.
self.flush_account()
msg = 'Previous account information was flushed'
self.respond('331 %s, send password.' % msg, logfun=logger.info)
self.username = line
def handle_auth_failed(self, msg, password):
def callback(username, password, msg):
self.add_channel()
if hasattr(self, '_closed') and not self._closed:
self.attempted_logins += 1
if self.attempted_logins >= self.max_login_attempts:
msg += " Disconnecting."
self.respond("530 " + msg)
self.close_when_done()
else:
self.respond("530 " + msg)
self.log("USER '%s' failed login." % username)
self.on_login_failed(username, password)
self.del_channel()
if not msg:
if self.username == 'anonymous':
msg = "Anonymous access not allowed."
else:
msg = "Authentication failed."
else:
# response string should be capitalized as per RFC-959
msg = msg.capitalize()
self.ioloop.call_later(self.auth_failed_timeout, callback,
self.username, password, msg,
_errback=self.handle_error)
self.username = ""
def handle_auth_success(self, home, password, msg_login):
if not isinstance(home, unicode):
if PY3:
raise TypeError('type(home) != text')
else:
warnings.warn(
'%s.get_home_dir returned a non-unicode string; now '
'casting to unicode' % (
self.authorizer.__class__.__name__),
RuntimeWarning)
home = home.decode('utf8')
if len(msg_login) <= 75:
self.respond('230 %s' % msg_login)
else:
self.push("230-%s\r\n" % msg_login)
self.respond("230 ")
self.log("USER '%s' logged in." % self.username)
self.authenticated = True
self.password = password
self.attempted_logins = 0
self.fs = self.abstracted_fs(home, self)
self.on_login(self.username)
def ftp_PASS(self, line):
"""Check username's password against the authorizer."""
if self.authenticated:
self.respond("503 User already authenticated.")
return
if not self.username:
self.respond("503 Login with USER first.")
return
try:
self.authorizer.validate_authentication(self.username, line, self)
home = self.authorizer.get_home_dir(self.username)
msg_login = self.authorizer.get_msg_login(self.username)
except (AuthenticationFailed, AuthorizerError) as err:
self.handle_auth_failed(str(err), line)
else:
self.handle_auth_success(home, line, msg_login)
def ftp_REIN(self, line):
"""Reinitialize user's current session."""
# From RFC-959:
# REIN command terminates a USER, flushing all I/O and account
# information, except to allow any transfer in progress to be
# completed. All parameters are reset to the default settings
# and the control connection is left open. This is identical
# to the state in which a user finds himself immediately after
# the control connection is opened.
self.flush_account()
# Note: RFC-959 erroneously mention "220" as the correct response
# code to be given in this case, but this is wrong...
self.respond("230 Ready for new user.")
# --- filesystem operations
def ftp_PWD(self, line):
"""Return the name of the current working directory to the client."""
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
cwd = self.fs.cwd
assert isinstance(cwd, unicode), cwd
self.respond('257 "%s" is the current directory.'
% cwd.replace('"', '""'))
def ftp_CWD(self, path):
"""Change the current working directory.
On success return the new directory path, else None.
"""
# Temporarily join the specified directory to see if we have
# permissions to do so, then get back to original process's
# current working directory.
# Note that if for some reason os.getcwd() gets removed after
# the process is started we'll get into troubles (os.getcwd()
# will fail with ENOENT) but we can't do anything about that
# except logging an error.
init_cwd = getcwdu()
try:
self.run_as_current_user(self.fs.chdir, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
cwd = self.fs.cwd
assert isinstance(cwd, unicode), cwd
self.respond('250 "%s" is the current directory.' % cwd)
if getcwdu() != init_cwd:
os.chdir(init_cwd)
return path
def ftp_CDUP(self, path):
"""Change into the parent directory.
On success return the new directory, else None.
"""
# Note: RFC-959 says that code 200 is required but it also says
# that CDUP uses the same codes as CWD.
return self.ftp_CWD(path)
def ftp_SIZE(self, path):
"""Return size of file in a format suitable for using with
RESTart as defined in RFC-3659."""
# Implementation note: properly handling the SIZE command when
# TYPE ASCII is used would require to scan the entire file to
# perform the ASCII translation logic
# (file.read().replace(os.linesep, '\r\n')) and then calculating
# the len of such data which may be different than the actual
# size of the file on the server. Considering that calculating
# such result could be very resource-intensive and also dangerous
# (DoS) we reject SIZE when the current TYPE is ASCII.
# However, clients in general should not be resuming downloads
# in ASCII mode. Resuming downloads in binary mode is the
# recommended way as specified in RFC-3659.
line = self.fs.fs2ftp(path)
if self._current_type == 'a':
why = "SIZE not allowed in ASCII mode"
self.respond("550 %s." % why)
return
if not self.fs.isfile(self.fs.realpath(path)):
why = "%s is not retrievable" % line
self.respond("550 %s." % why)
return
try:
size = self.run_as_current_user(self.fs.getsize, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond("213 %s" % size)
def ftp_MDTM(self, path):
"""Return last modification time of file to the client as an ISO
3307 style timestamp (YYYYMMDDHHMMSS) as defined in RFC-3659.
On success return the file path, else None.
"""
line = self.fs.fs2ftp(path)
if not self.fs.isfile(self.fs.realpath(path)):
self.respond("550 %s is not retrievable" % line)
return
if self.use_gmt_times:
timefunc = time.gmtime
else:
timefunc = time.localtime
try:
secs = self.run_as_current_user(self.fs.getmtime, path)
lmt = time.strftime("%Y%m%d%H%M%S", timefunc(secs))
except (ValueError, OSError, FilesystemError) as err:
if isinstance(err, ValueError):
# It could happen if file's last modification time
# happens to be too old (prior to year 1900)
why = "Can't determine file's last modification time"
else:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond("213 %s" % lmt)
return path
def ftp_MFMT(self, path, timeval):
""" Sets the last modification time of file to timeval
3307 style timestamp (YYYYMMDDHHMMSS) as defined in RFC-3659.
On success return the modified time and file path, else None.
"""
# Note: the MFMT command is not a formal RFC command
# but stated in the following MEMO:
# https://tools.ietf.org/html/draft-somers-ftp-mfxx-04
# this is implemented to assist with file synchronization
line = self.fs.fs2ftp(path)
if len(timeval) != len("YYYYMMDDHHMMSS"):
why = "Invalid time format; expected: YYYYMMDDHHMMSS"
self.respond('550 %s.' % why)
return
if not self.fs.isfile(self.fs.realpath(path)):
self.respond("550 %s is not retrievable" % line)
return
if self.use_gmt_times:
timefunc = time.gmtime
else:
timefunc = time.localtime
try:
# convert timeval string to epoch seconds
epoch = datetime.utcfromtimestamp(0)
timeval_datetime_obj = datetime.strptime(timeval, '%Y%m%d%H%M%S')
timeval_secs = (timeval_datetime_obj - epoch).total_seconds()
except ValueError:
why = "Invalid time format; expected: YYYYMMDDHHMMSS"
self.respond('550 %s.' % why)
return
try:
# Modify Time
self.run_as_current_user(self.fs.utime, path, timeval_secs)
# Fetch Time
secs = self.run_as_current_user(self.fs.getmtime, path)
lmt = time.strftime("%Y%m%d%H%M%S", timefunc(secs))
except (ValueError, OSError, FilesystemError) as err:
if isinstance(err, ValueError):
# It could happen if file's last modification time
# happens to be too old (prior to year 1900)
why = "Can't determine file's last modification time"
else:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond("213 Modify=%s; %s." % (lmt, line))
return (lmt, path)
def ftp_MKD(self, path):
"""Create the specified directory.
On success return the directory path, else None.
"""
line = self.fs.fs2ftp(path)
try:
self.run_as_current_user(self.fs.mkdir, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.respond(
'257 "%s" directory created.' % line.replace('"', '""'))
return path
def ftp_RMD(self, path):
"""Remove the specified directory.
On success return the directory path, else None.
"""
if self.fs.realpath(path) == self.fs.realpath(self.fs.root):
msg = "Can't remove root directory."
self.respond("550 %s" % msg)
return
try:
self.run_as_current_user(self.fs.rmdir, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond("250 Directory removed.")
def ftp_DELE(self, path):
"""Delete the specified file.
On success return the file path, else None.
"""
try:
self.run_as_current_user(self.fs.remove, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond("250 File removed.")
return path
def ftp_RNFR(self, path):
"""Rename the specified (only the source name is specified
here, see RNTO command)"""
if not self.fs.lexists(path):
self.respond("550 No such file or directory.")
elif self.fs.realpath(path) == self.fs.realpath(self.fs.root):
self.respond("550 Can't rename home directory.")
else:
self._rnfr = path
self.respond("350 Ready for destination name.")
def ftp_RNTO(self, path):
"""Rename file (destination name only, source is specified with
RNFR).
On success return a (source_path, destination_path) tuple.
"""
if not self._rnfr:
self.respond("503 Bad sequence of commands: use RNFR first.")
return
src = self._rnfr
self._rnfr = None
try:
self.run_as_current_user(self.fs.rename, src, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond("250 Renaming ok.")
return (src, path)
# --- others
def ftp_TYPE(self, line):
"""Set current type data type to binary/ascii"""
type = line.upper().replace(' ', '')
if type in ("A", "L7"):
self.respond("200 Type set to: ASCII.")
self._current_type = 'a'
elif type in ("I", "L8"):
self.respond("200 Type set to: Binary.")
self._current_type = 'i'
else:
self.respond('504 Unsupported type "%s".' % line)
def ftp_STRU(self, line):
"""Set file structure ("F" is the only one supported (noop))."""
stru = line.upper()
if stru == 'F':
self.respond('200 File transfer structure set to: F.')
elif stru in ('P', 'R'):
# R is required in minimum implementations by RFC-959, 5.1.
# RFC-1123, 4.1.2.13, amends this to only apply to servers
# whose file systems support record structures, but also
# suggests that such a server "may still accept files with
# STRU R, recording the byte stream literally".
# Should we accept R but with no operational difference from
# F? proftpd and wu-ftpd don't accept STRU R. We just do
# the same.
#
# RFC-1123 recommends against implementing P.
self.respond('504 Unimplemented STRU type.')
else:
self.respond('501 Unrecognized STRU type.')
def ftp_MODE(self, line):
"""Set data transfer mode ("S" is the only one supported (noop))."""
mode = line.upper()
if mode == 'S':
self.respond('200 Transfer mode set to: S')
elif mode in ('B', 'C'):
self.respond('504 Unimplemented MODE type.')
else:
self.respond('501 Unrecognized MODE type.')
def ftp_STAT(self, path):
"""Return statistics about current ftp session. If an argument
is provided return directory listing over command channel.
Implementation note:
RFC-959 does not explicitly mention globbing but many FTP
servers do support it as a measure of convenience for FTP
clients and users.
In order to search for and match the given globbing expression,
the code has to search (possibly) many directories, examine
each contained filename, and build a list of matching files in
memory. Since this operation can be quite intensive, both CPU-
and memory-wise, we do not support globbing.
"""
# return STATus information about ftpd
if not path:
s = []
s.append('Connected to: %s:%s' % self.socket.getsockname()[:2])
if self.authenticated:
s.append('Logged in as: %s' % self.username)
else:
if not self.username:
s.append("Waiting for username.")
else:
s.append("Waiting for password.")
if self._current_type == 'a':
type = 'ASCII'
else:
type = 'Binary'
s.append("TYPE: %s; STRUcture: File; MODE: Stream" % type)
if self._dtp_acceptor is not None:
s.append('Passive data channel waiting for connection.')
elif self.data_channel is not None:
bytes_sent = self.data_channel.tot_bytes_sent
bytes_recv = self.data_channel.tot_bytes_received
elapsed_time = self.data_channel.get_elapsed_time()
s.append('Data connection open:')
s.append('Total bytes sent: %s' % bytes_sent)
s.append('Total bytes received: %s' % bytes_recv)
s.append('Transfer elapsed time: %s secs' % elapsed_time)
else:
s.append('Data connection closed.')
self.push('211-FTP server status:\r\n')
self.push(''.join([' %s\r\n' % item for item in s]))
self.respond('211 End of status.')
# return directory LISTing over the command channel
else:
line = self.fs.fs2ftp(path)
try:
isdir = self.fs.isdir(path)
if isdir:
listing = self.run_as_current_user(self.fs.listdir, path)
if isinstance(listing, list):
try:
# RFC 959 recommends the listing to be sorted.
listing.sort()
except UnicodeDecodeError:
# (Python 2 only) might happen on filesystem not
# supporting UTF8 meaning os.listdir() returned a
# list of mixed bytes and unicode strings:
# http://goo.gl/6DLHD
# http://bugs.python.org/issue683592
pass
iterator = self.fs.format_list(path, listing)
else:
basedir, filename = os.path.split(path)
self.fs.lstat(path) # raise exc in case of problems
iterator = self.fs.format_list(basedir, [filename])
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.push('213-Status of "%s":\r\n' % line)
self.push_with_producer(BufferedIteratorProducer(iterator))
self.respond('213 End of status.')
return path
def ftp_FEAT(self, line):
"""List all new features supported as defined in RFC-2398."""
features = set(['UTF8', 'TVFS'])
features.update([feat for feat in
('EPRT', 'EPSV', 'MDTM', 'MFMT', 'SIZE')
if feat in self.proto_cmds])
features.update(self._extra_feats)
if 'MLST' in self.proto_cmds or 'MLSD' in self.proto_cmds:
facts = ''
for fact in self._available_facts:
if fact in self._current_facts:
facts += fact + '*;'
else:
facts += fact + ';'
features.add('MLST ' + facts)
if 'REST' in self.proto_cmds:
features.add('REST STREAM')
features = sorted(features)
self.push("211-Features supported:\r\n")
self.push("".join([" %s\r\n" % x for x in features]))
self.respond('211 End FEAT.')
def ftp_OPTS(self, line):
"""Specify options for FTP commands as specified in RFC-2389."""
try:
if line.count(' ') > 1:
raise ValueError('Invalid number of arguments')
if ' ' in line:
cmd, arg = line.split(' ')
if ';' not in arg:
raise ValueError('Invalid argument')
else:
cmd, arg = line, ''
# actually the only command able to accept options is MLST
if cmd.upper() != 'MLST' or 'MLST' not in self.proto_cmds:
raise ValueError('Unsupported command "%s"' % cmd)
except ValueError as err:
self.respond('501 %s.' % err)
else:
facts = [x.lower() for x in arg.split(';')]
self._current_facts = \
[x for x in facts if x in self._available_facts]
f = ''.join([x + ';' for x in self._current_facts])
self.respond('200 MLST OPTS ' + f)
def ftp_NOOP(self, line):
"""Do nothing."""
self.respond("200 I successfully did nothing'.")
def ftp_SYST(self, line):
"""Return system type (always returns UNIX type: L8)."""
# This command is used to find out the type of operating system
# at the server. The reply shall have as its first word one of
# the system names listed in RFC-943.
# Since that we always return a "/bin/ls -lA"-like output on
# LIST we prefer to respond as if we would on Unix in any case.
self.respond("215 UNIX Type: L8")
def ftp_ALLO(self, line):
"""Allocate bytes for storage (noop)."""
# not necessary (always respond with 202)
self.respond("202 No storage allocation necessary.")
def ftp_HELP(self, line):
"""Return help text to the client."""
if line:
line = line.upper()
if line in self.proto_cmds:
self.respond("214 %s" % self.proto_cmds[line]['help'])
else:
self.respond("501 Unrecognized command.")
else:
# provide a compact list of recognized commands
def formatted_help():
cmds = []
keys = sorted([x for x in self.proto_cmds.keys()
if not x.startswith('SITE ')])
while keys:
elems = tuple((keys[0:8]))
cmds.append(' %-6s' * len(elems) % elems + '\r\n')
del keys[0:8]
return ''.join(cmds)
self.push("214-The following commands are recognized:\r\n")
self.push(formatted_help())
self.respond("214 Help command successful.")
# --- site commands
# The user willing to add support for a specific SITE command must
# update self.proto_cmds dictionary and define a new ftp_SITE_%CMD%
# method in the subclass.
def ftp_SITE_CHMOD(self, path, mode):
"""Change file mode.
On success return a (file_path, mode) tuple.
"""
# Note: although most UNIX servers implement it, SITE CHMOD is not
# defined in any official RFC.
try:
assert len(mode) in (3, 4)
for x in mode:
assert 0 <= int(x) <= 7
mode = int(mode, 8)
except (AssertionError, ValueError):
self.respond("501 Invalid SITE CHMOD format.")
else:
try:
self.run_as_current_user(self.fs.chmod, path, mode)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
self.respond('200 SITE CHMOD successful.')
return (path, mode)
def ftp_SITE_HELP(self, line):
"""Return help text to the client for a given SITE command."""
if line:
line = line.upper()
if line in self.proto_cmds:
self.respond("214 %s" % self.proto_cmds[line]['help'])
else:
self.respond("501 Unrecognized SITE command.")
else:
self.push("214-The following SITE commands are recognized:\r\n")
site_cmds = []
for cmd in sorted(self.proto_cmds.keys()):
if cmd.startswith('SITE '):
site_cmds.append(' %s\r\n' % cmd[5:])
self.push(''.join(site_cmds))
self.respond("214 Help SITE command successful.")
# --- support for deprecated cmds
# RFC-1123 requires that the server treat XCUP, XCWD, XMKD, XPWD
# and XRMD commands as synonyms for CDUP, CWD, MKD, LIST and RMD.
# Such commands are obsoleted but some ftp clients (e.g. Windows
# ftp.exe) still use them.
def ftp_XCUP(self, line):
"Change to the parent directory. Synonym for CDUP. Deprecated."
return self.ftp_CDUP(line)
def ftp_XCWD(self, line):
"Change the current working directory. Synonym for CWD. Deprecated."
return self.ftp_CWD(line)
def ftp_XMKD(self, line):
"Create the specified directory. Synonym for MKD. Deprecated."
return self.ftp_MKD(line)
def ftp_XPWD(self, line):
"Return the current working directory. Synonym for PWD. Deprecated."
return self.ftp_PWD(line)
def ftp_XRMD(self, line):
"Remove the specified directory. Synonym for RMD. Deprecated."
return self.ftp_RMD(line)
# ===================================================================
# --- FTP over SSL
# ===================================================================
if SSL is not None:
class SSLConnection(_AsyncChatNewStyle):
"""An AsyncChat subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_established = False
_ssl_closing = False
_ssl_requested = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._error = False
self._ssl_want_read = False
self._ssl_want_write = False
def readable(self):
return self._ssl_want_read or super().readable()
def writable(self):
return self._ssl_want_write or super().writable()
def secure_connection(self, ssl_context):
"""Secure the connection switching from plain-text to
SSL/TLS.
"""
debug("securing SSL connection", self)
self._ssl_requested = True
try:
self.socket = SSL.Connection(ssl_context, self.socket)
except socket.error as err:
# may happen in case the client connects/disconnects
# very quickly
debug(
"call: secure_connection(); can't secure SSL connection "
"%r; closing" % err, self)
self.close()
except ValueError:
# may happen in case the client connects/disconnects
# very quickly
if self.socket.fileno() == -1:
debug(
"ValueError and fd == -1 on secure_connection()", self)
return
raise
else:
self.socket.set_accept_state()
self._ssl_accepting = True
@contextlib.contextmanager
def _handle_ssl_want_rw(self):
prev_row_pending = self._ssl_want_read or self._ssl_want_write
try:
yield
except SSL.WantReadError:
# we should never get here; it's just for extra safety
self._ssl_want_read = True
except SSL.WantWriteError:
# we should never get here; it's just for extra safety
self._ssl_want_write = True
if self._ssl_want_read:
self.modify_ioloop_events(
self._wanted_io_events | self.ioloop.READ, logdebug=True)
elif self._ssl_want_write:
self.modify_ioloop_events(
self._wanted_io_events | self.ioloop.WRITE, logdebug=True)
else:
if prev_row_pending:
self.modify_ioloop_events(self._wanted_io_events)
def _do_ssl_handshake(self):
self._ssl_accepting = True
self._ssl_want_read = False
self._ssl_want_write = False
try:
self.socket.do_handshake()
except SSL.WantReadError:
self._ssl_want_read = True
debug("call: _do_ssl_handshake, err: ssl-want-read", inst=self)
except SSL.WantWriteError:
self._ssl_want_write = True
debug("call: _do_ssl_handshake, err: ssl-want-write",
inst=self)
except SSL.SysCallError as err:
debug("call: _do_ssl_handshake, err: %r" % err, inst=self)
retval, desc = err.args
if (retval == -1 and desc == 'Unexpected EOF') or retval > 0:
return self.handle_close()
raise
except SSL.Error as err:
debug("call: _do_ssl_handshake, err: %r" % err, inst=self)
return self.handle_failed_ssl_handshake()
else:
debug("SSL connection established", self)
self._ssl_accepting = False
self._ssl_established = True
self.handle_ssl_established()
def handle_ssl_established(self):
"""Called when SSL handshake has completed."""
pass
def handle_ssl_shutdown(self):
"""Called when SSL shutdown() has completed."""
super().close()
def handle_failed_ssl_handshake(self):
raise NotImplementedError("must be implemented in subclass")
def handle_read_event(self):
if not self._ssl_requested:
super().handle_read_event()
else:
with self._handle_ssl_want_rw():
self._ssl_want_read = False
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super().handle_read_event()
def handle_write_event(self):
if not self._ssl_requested:
super().handle_write_event()
else:
with self._handle_ssl_want_rw():
self._ssl_want_write = False
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super().handle_write_event()
def handle_error(self):
self._error = True
try:
raise
except Exception:
self.log_exception(self)
# when facing an unhandled exception in here it's better
# to rely on base class (FTPHandler or DTPHandler)
# close() method as it does not imply SSL shutdown logic
try:
super().close()
except Exception:
logger.critical(traceback.format_exc())
def send(self, data):
if not isinstance(data, bytes):
data = bytes(data)
try:
return super().send(data)
except SSL.WantReadError:
debug("call: send(), err: ssl-want-read", inst=self)
self._ssl_want_read = True
return 0
except SSL.WantWriteError:
debug("call: send(), err: ssl-want-write", inst=self)
self._ssl_want_write = True
return 0
except SSL.ZeroReturnError:
debug(
"call: send() -> shutdown(), err: zero-return", inst=self)
super().handle_close()
return 0
except SSL.SysCallError as err:
debug("call: send(), err: %r" % err, inst=self)
errnum, errstr = err.args
if errnum == errno.EWOULDBLOCK:
return 0
elif errnum in _ERRNOS_DISCONNECTED or \
errstr == 'Unexpected EOF':
super().handle_close()
return 0
else:
raise
def recv(self, buffer_size):
try:
return super().recv(buffer_size)
except SSL.WantReadError:
debug("call: recv(), err: ssl-want-read", inst=self)
self._ssl_want_read = True
raise RetryError
except SSL.WantWriteError:
debug("call: recv(), err: ssl-want-write", inst=self)
self._ssl_want_write = True
raise RetryError
except SSL.ZeroReturnError:
debug("call: recv() -> shutdown(), err: zero-return",
inst=self)
super().handle_close()
return b''
except SSL.SysCallError as err:
debug("call: recv(), err: %r" % err, inst=self)
errnum, errstr = err.args
if errnum in _ERRNOS_DISCONNECTED or \
errstr == 'Unexpected EOF':
super().handle_close()
return b''
else:
raise
def _do_ssl_shutdown(self):
"""Executes a SSL_shutdown() call to revert the connection
back to clear-text.
twisted/internet/tcp.py code has been used as an example.
"""
self._ssl_closing = True
if os.name == 'posix':
# since SSL_shutdown() doesn't report errors, an empty
# write call is done first, to try to detect if the
# connection has gone away
try:
os.write(self.socket.fileno(), b'')
except (OSError, socket.error) as err:
debug(
"call: _do_ssl_shutdown() -> os.write, err: %r" % err,
inst=self)
if err.errno in (errno.EINTR, errno.EWOULDBLOCK,
errno.ENOBUFS):
return
elif err.errno in _ERRNOS_DISCONNECTED:
return super().close()
else:
raise
# Ok, this a mess, but the underlying OpenSSL API simply
# *SUCKS* and I really couldn't do any better.
#
# Here we just want to shutdown() the SSL layer and then
# close() the connection so we're not interested in a
# complete SSL shutdown() handshake, so let's pretend
# we already received a "RECEIVED" shutdown notification
# from the client.
# Once the client received our "SENT" shutdown notification
# then we close() the connection.
#
# Since it is not clear what errors to expect during the
# entire procedure we catch them all and assume the
# following:
# - WantReadError and WantWriteError means "retry"
# - ZeroReturnError, SysCallError[EOF], Error[] are all
# aliases for disconnection
try:
laststate = self.socket.get_shutdown()
self.socket.set_shutdown(laststate | SSL.RECEIVED_SHUTDOWN)
done = self.socket.shutdown()
if not (laststate & SSL.RECEIVED_SHUTDOWN):
self.socket.set_shutdown(SSL.SENT_SHUTDOWN)
except SSL.WantReadError:
self._ssl_want_read = True
debug("call: _do_ssl_shutdown, err: ssl-want-read", inst=self)
except SSL.WantWriteError:
self._ssl_want_write = True
debug("call: _do_ssl_shutdown, err: ssl-want-write", inst=self)
except SSL.ZeroReturnError:
debug(
"call: _do_ssl_shutdown() -> shutdown(), err: zero-return",
inst=self)
super().close()
except SSL.SysCallError as err:
debug("call: _do_ssl_shutdown() -> shutdown(), err: %r" % err,
inst=self)
errnum, errstr = err.args
if errnum in _ERRNOS_DISCONNECTED or \
errstr == 'Unexpected EOF':
super().close()
else:
raise
except SSL.Error as err:
debug("call: _do_ssl_shutdown() -> shutdown(), err: %r" % err,
inst=self)
# see:
# https://github.com/giampaolo/pyftpdlib/issues/171
# https://bugs.launchpad.net/pyopenssl/+bug/785985
if err.args and not getattr(err, "errno", None):
pass
else:
raise
except socket.error as err:
debug("call: _do_ssl_shutdown() -> shutdown(), err: %r" % err,
inst=self)
if err.errno in _ERRNOS_DISCONNECTED:
super().close()
else:
raise
else:
if done:
debug("call: _do_ssl_shutdown(), shutdown completed",
inst=self)
self._ssl_established = False
self._ssl_closing = False
self.handle_ssl_shutdown()
else:
debug(
"call: _do_ssl_shutdown(), shutdown not completed yet",
inst=self)
def close(self):
if self._ssl_established and not self._error:
self._do_ssl_shutdown()
else:
self._ssl_accepting = False
self._ssl_established = False
self._ssl_closing = False
super().close()
class TLS_DTPHandler(SSLConnection, DTPHandler):
"""A DTPHandler subclass supporting TLS/SSL."""
def __init__(self, sock, cmd_channel):
super().__init__(sock, cmd_channel)
if self.cmd_channel._prot:
self.secure_connection(self.cmd_channel.ssl_context)
def __repr__(self):
return DTPHandler.__repr__(self)
def use_sendfile(self):
if isinstance(self.socket, SSL.Connection):
return False
else:
return super().use_sendfile()
def handle_failed_ssl_handshake(self):
# TLS/SSL handshake failure, probably client's fault which
# used a SSL version different from server's.
# RFC-4217, chapter 10.2 expects us to return 522 over the
# command channel.
self.cmd_channel.respond("522 SSL handshake failed.")
self.cmd_channel.log_cmd("PROT", "P", 522, "SSL handshake failed.")
self.close()
class TLS_FTPHandler(SSLConnection, FTPHandler):
"""A FTPHandler subclass supporting TLS/SSL.
Implements AUTH, PBSZ and PROT commands (RFC-2228 and RFC-4217).
Configurable attributes:
- (bool) tls_control_required:
When True requires SSL/TLS to be established on the control
channel, before logging in. This means the user will have
to issue AUTH before USER/PASS (default False).
- (bool) tls_data_required:
When True requires SSL/TLS to be established on the data
channel. This means the user will have to issue PROT
before PASV or PORT (default False).
SSL-specific options:
- (string) certfile:
the path to the file which contains a certificate to be
used to identify the local side of the connection.
This must always be specified, unless context is provided
instead.
- (string) keyfile:
the path to the file containing the private RSA key;
can be omitted if certfile already contains the private
key (defaults: None).
- (int) ssl_protocol:
the desired SSL protocol version to use. This defaults to
PROTOCOL_SSLv23 which will negotiate the highest protocol
that both the server and your installation of OpenSSL
support.
- (int) ssl_options:
specific OpenSSL options. These default to:
SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3| SSL.OP_NO_COMPRESSION
which are all considered insecure features.
Can be set to None in order to improve compatibility with
older (insecure) FTP clients.
- (instance) ssl_context:
a SSL Context object previously configured; if specified
all other parameters will be ignored.
(default None).
"""
# configurable attributes
tls_control_required = False
tls_data_required = False
certfile = None
keyfile = None
ssl_protocol = SSL.SSLv23_METHOD
# - SSLv2 is easily broken and is considered harmful and dangerous
# - SSLv3 has several problems and is now dangerous
# - Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (see https://github.com/shazow/urllib3/pull/309)
ssl_options = SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3
if hasattr(SSL, "OP_NO_COMPRESSION"):
ssl_options |= SSL.OP_NO_COMPRESSION
ssl_context = None
# overridden attributes
dtp_handler = TLS_DTPHandler
proto_cmds = FTPHandler.proto_cmds.copy()
proto_cmds.update({
'AUTH': dict(
perm=None, auth=False, arg=True,
help='Syntax: AUTH <SP> TLS|SSL (set up secure control '
'channel).'),
'PBSZ': dict(
perm=None, auth=False, arg=True,
help='Syntax: PBSZ <SP> 0 (negotiate TLS buffer).'),
'PROT': dict(
perm=None, auth=False, arg=True,
help='Syntax: PROT <SP> [C|P] (set up un/secure data '
'channel).'),
})
def __init__(self, conn, server, ioloop=None):
super().__init__(conn, server, ioloop)
if not self.connected:
return
self._extra_feats = ['AUTH TLS', 'AUTH SSL', 'PBSZ', 'PROT']
self._pbsz = False
self._prot = False
self.ssl_context = self.get_ssl_context()
def __repr__(self):
return FTPHandler.__repr__(self)
@classmethod
def get_ssl_context(cls):
if cls.ssl_context is None:
if cls.certfile is None:
raise ValueError("at least certfile must be specified")
cls.ssl_context = SSL.Context(cls.ssl_protocol)
if cls.ssl_protocol != SSL.SSLv2_METHOD:
cls.ssl_context.set_options(SSL.OP_NO_SSLv2)
else:
warnings.warn("SSLv2 protocol is insecure", RuntimeWarning)
cls.ssl_context.use_certificate_chain_file(cls.certfile)
if not cls.keyfile:
cls.keyfile = cls.certfile
cls.ssl_context.use_privatekey_file(cls.keyfile)
if cls.ssl_options:
cls.ssl_context.set_options(cls.ssl_options)
return cls.ssl_context
# --- overridden methods
def flush_account(self):
FTPHandler.flush_account(self)
self._pbsz = False
self._prot = False
def process_command(self, cmd, *args, **kwargs):
if cmd in ('USER', 'PASS'):
if self.tls_control_required and not self._ssl_established:
msg = "SSL/TLS required on the control channel."
self.respond("550 " + msg)
self.log_cmd(cmd, args[0], 550, msg)
return
elif cmd in ('PASV', 'EPSV', 'PORT', 'EPRT'):
if self.tls_data_required and not self._prot:
msg = "SSL/TLS required on the data channel."
self.respond("550 " + msg)
self.log_cmd(cmd, args[0], 550, msg)
return
FTPHandler.process_command(self, cmd, *args, **kwargs)
# --- new methods
def handle_failed_ssl_handshake(self):
# TLS/SSL handshake failure, probably client's fault which
# used a SSL version different from server's.
# We can't rely on the control connection anymore so we just
# disconnect the client without sending any response.
self.log("SSL handshake failed.")
self.close()
def ftp_AUTH(self, line):
"""Set up secure control channel."""
arg = line.upper()
if isinstance(self.socket, SSL.Connection):
self.respond("503 Already using TLS.")
elif arg in ('TLS', 'TLS-C', 'SSL', 'TLS-P'):
# From RFC-4217: "As the SSL/TLS protocols self-negotiate
# their levels, there is no need to distinguish between SSL
# and TLS in the application layer".
self.respond('234 AUTH %s successful.' % arg)
self.secure_connection(self.ssl_context)
else:
self.respond(
"502 Unrecognized encryption type (use TLS or SSL).")
def ftp_PBSZ(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
if not isinstance(self.socket, SSL.Connection):
self.respond(
"503 PBSZ not allowed on insecure control connection.")
else:
self.respond('200 PBSZ=0 successful.')
self._pbsz = True
def ftp_PROT(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if not isinstance(self.socket, SSL.Connection):
self.respond(
"503 PROT not allowed on insecure control connection.")
elif not self._pbsz:
self.respond(
"503 You must issue the PBSZ command prior to PROT.")
elif arg == 'C':
self.respond('200 Protection set to Clear')
self._prot = False
elif arg == 'P':
self.respond('200 Protection set to Private')
self._prot = True
elif arg in ('S', 'E'):
self.respond('521 PROT %s unsupported (use C or P).' % arg)
else:
self.respond("502 Unrecognized PROT type (use C or P).")
| mit |
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/django_contrib_comments-1.6.1-py2.7.egg/django_comments/views/moderation.py | 10 | 5257 | from __future__ import absolute_import
from django import template
from django.conf import settings
from django.contrib.auth.decorators import login_required, permission_required
from django.shortcuts import get_object_or_404, render_to_response
from django.views.decorators.csrf import csrf_protect
import django_comments
from django_comments import signals
from django_comments.views.utils import next_redirect, confirmation_view
@csrf_protect
@login_required
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: :template:`comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(django_comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Flag on POST
if request.method == 'POST':
perform_flag(request, comment)
return next_redirect(request, fallback=next or 'comments-flag-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response(
'comments/flag.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("django_comments.can_moderate")
def delete(request, comment_id, next=None):
"""
Deletes a comment. Confirmation on GET, action on POST. Requires the "can
moderate comments" permission.
Templates: :template:`comments/delete.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(django_comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as deleted instead of actually deleting it.
perform_delete(request, comment)
return next_redirect(request, fallback=next or 'comments-delete-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response(
'comments/delete.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("django_comments.can_moderate")
def approve(request, comment_id, next=None):
"""
Approve a comment (that is, mark it as public and non-removed). Confirmation
on GET, action on POST. Requires the "can moderate comments" permission.
Templates: :template:`comments/approve.html`,
Context:
comment
the `comments.comment` object for approval
"""
comment = get_object_or_404(django_comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as approved.
perform_approve(request, comment)
return next_redirect(request, fallback=next or 'comments-approve-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response(
'comments/approve.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
# The following functions actually perform the various flag/aprove/delete
# actions. They've been broken out into separate functions to that they
# may be called from admin actions.
def perform_flag(request, comment):
"""
Actually perform the flagging of a comment from a request.
"""
flag, created = django_comments.models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=django_comments.models.CommentFlag.SUGGEST_REMOVAL
)
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
def perform_delete(request, comment):
flag, created = django_comments.models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=django_comments.models.CommentFlag.MODERATOR_DELETION
)
comment.is_removed = True
comment.save()
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
def perform_approve(request, comment):
flag, created = django_comments.models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=django_comments.models.CommentFlag.MODERATOR_APPROVAL,
)
comment.is_removed = False
comment.is_public = True
comment.save()
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
# Confirmation views.
flag_done = confirmation_view(
template="comments/flagged.html",
doc='Displays a "comment was flagged" success page.'
)
delete_done = confirmation_view(
template="comments/deleted.html",
doc='Displays a "comment was deleted" success page.'
)
approve_done = confirmation_view(
template="comments/approved.html",
doc='Displays a "comment was approved" success page.'
)
| gpl-2.0 |
Blindle/Raspberry | src/state/objects/WordsImporter.py | 1 | 3679 | import os
import json
import helpers.musicHelper as musicHelper
import helpers.usbHelper as usbHelper
from Processor import Processor
from state.stateEnum import StateEnum
from blindle_exceptions.FileNotFoundException import FileNotFoundException
from blindle_exceptions.PendriveDisconnectedException import PendriveDisconnectedException
from blindle_exceptions.FormatException import FormatException
class WordsImporter(Processor):
_PREVIOUS_STATE = StateEnum.CONFIG
_LEARN = "aprendizaje"
_EVALUATE = "evaluacion"
_FILE_NAME = "palabras_a_cargar"
_imported_words = []
def __init__(self):
super(WordsImporter, self).__init__()
print "Toque Enter para importar"
self._play_words_importer_sound("message")
def _set_attributes(self):
super(WordsImporter, self)._set_attributes()
self._previous_state = self._PREVIOUS_STATE
def _select_option(self):
try:
self._import_words()
print "Carga de palabras exitosa"
self._play_words_importer_sound("ok")
except PendriveDisconnectedException:
print "Toque Enter para continuar, Back para salir"
self._play_words_importer_sound("retry")
return
except (FileNotFoundException, FormatException):
pass
self._back_to_previous_state()
def _import_words(self):
f = usbHelper.open_txt_file_from_pendrive(self._FILE_NAME)
result_dict = dict()
line = f.readline()
while line:
line = line.replace('\n', '').replace('\r', '').replace(':', '')
if line == self._LEARN:
result_dict = self._decode_module_words(f, "learn-levels", result_dict)
elif line == self._EVALUATE:
result_dict = self._decode_module_words(f, "evaluation-levels", result_dict)
else:
raise FormatException()
line = f.readline()
f.close()
self._write_custom_levels_file(result_dict)
musicHelper.generate_custom_word_sounds(self._imported_words)
def _decode_module_words(self, file, module_key, result_dict):
line = file.readline()
dict_1 = self._decode_level_words(line)
line = file.readline()
dict_2 = self._decode_level_words(line)
line = file.readline()
dict_3 = self._decode_level_words(line)
result_dict[module_key] = [dict_1, dict_2, dict_3]
return result_dict
def _decode_level_words(self, level_line):
my_dict = dict()
level_line = level_line.split(':')
if len(level_line) != 2: #Caracter (:) mal usado
raise FormatException()
level_number = level_line[0]
words_unprocessed = level_line[1].split(',')
if len(words_unprocessed) == 0: #No se utilizo el caracter (,) entre palabras, o esta vacio
raise FormatException()
words_processed = []
for word in words_unprocessed:
words_processed.append(word.replace(' ', '').replace('\n', '').replace('\r', ''))
my_dict["id"] = level_number
my_dict["words"] = words_processed
self._imported_words.extend(words_processed)
return my_dict
def _write_custom_levels_file(self, result_dict):
json_str = json.dumps(result_dict)
parsed = json.loads(json_str)
file_to_write = open("config/custom_levels.json", 'w')
file_to_write.write(json.dumps(parsed, indent=4, sort_keys=True))
file_to_write.close()
def _play_words_importer_sound(self, name):
musicHelper.play_navigation_sound("words-importer-" + name) | mit |
openfun/edx-platform | scripts/metrics/publish.py | 69 | 1310 | """
Publish Build Stats.
"""
import os
import subprocess
from dogapi import dog_http_api
from coverage_metrics import CoverageMetrics
class PublishStats(object):
"""
Publish stats to DataDog.
"""
def __init__(self, api_key):
dog_http_api.api_key = api_key
@staticmethod
def report_metrics(metrics):
"""
Send metrics to DataDog.
Arguments:
metrics (dict): data to publish
"""
for key, value in metrics.iteritems():
print u"Sending {} ==> {}%".format(key, value)
dog_http_api.metric(key, value)
def main(api_key):
"""
Send Stats for everything to DataDog.
"""
dir_path = os.path.dirname(os.path.relpath(__file__))
unit_reports_cmd = ['find', 'reports', '-name', '"coverage.xml"']
unit_report_paths = subprocess.check_output(unit_reports_cmd)
cov_metrics = CoverageMetrics(os.path.join(dir_path, 'unit_test_groups.json'), unit_report_paths)
coverage_metrics = cov_metrics.coverage_metrics()
# Publish Coverage Stats to DataDog
PublishStats(api_key).report_metrics(coverage_metrics)
if __name__ == "__main__":
API_KEY = os.environ.get('DATADOG_API_KEY')
if API_KEY:
main(API_KEY)
else:
print 'SKIP: Publish Stats to Datadog'
| agpl-3.0 |
nach00/Test4 | tests/test_soup.py | 272 | 17391 | # -*- coding: utf-8 -*-
"""Tests of Beautiful Soup as a whole."""
import logging
import unittest
import sys
import tempfile
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
SoupStrainer,
NamespacedAttribute,
)
import bs4.dammit
from bs4.dammit import (
EntitySubstitution,
UnicodeDammit,
)
from bs4.testing import (
SoupTest,
skipIf,
)
import warnings
try:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
LXML_PRESENT = True
except ImportError, e:
LXML_PRESENT = False
PYTHON_2_PRE_2_7 = (sys.version_info < (2,7))
PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2))
class TestConstructor(SoupTest):
def test_short_unicode_input(self):
data = u"<h1>éé</h1>"
soup = self.soup(data)
self.assertEqual(u"éé", soup.h1.string)
def test_embedded_null(self):
data = u"<h1>foo\0bar</h1>"
soup = self.soup(data)
self.assertEqual(u"foo\0bar", soup.h1.string)
class TestDeprecatedConstructorArguments(SoupTest):
def test_parseOnlyThese_renamed_to_parse_only(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>", parseOnlyThese=SoupStrainer("b"))
msg = str(w[0].message)
self.assertTrue("parseOnlyThese" in msg)
self.assertTrue("parse_only" in msg)
self.assertEqual(b"<b></b>", soup.encode())
def test_fromEncoding_renamed_to_from_encoding(self):
with warnings.catch_warnings(record=True) as w:
utf8 = b"\xc3\xa9"
soup = self.soup(utf8, fromEncoding="utf8")
msg = str(w[0].message)
self.assertTrue("fromEncoding" in msg)
self.assertTrue("from_encoding" in msg)
self.assertEqual("utf8", soup.original_encoding)
def test_unrecognized_keyword_argument(self):
self.assertRaises(
TypeError, self.soup, "<a>", no_such_argument=True)
class TestWarnings(SoupTest):
def test_disk_file_warning(self):
filehandle = tempfile.NamedTemporaryFile()
filename = filehandle.name
try:
with warnings.catch_warnings(record=True) as w:
soup = self.soup(filename)
msg = str(w[0].message)
self.assertTrue("looks like a filename" in msg)
finally:
filehandle.close()
# The file no longer exists, so Beautiful Soup will no longer issue the warning.
with warnings.catch_warnings(record=True) as w:
soup = self.soup(filename)
self.assertEqual(0, len(w))
def test_url_warning(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("http://www.crummy.com/")
msg = str(w[0].message)
self.assertTrue("looks like a URL" in msg)
with warnings.catch_warnings(record=True) as w:
soup = self.soup("http://www.crummy.com/ is great")
self.assertEqual(0, len(w))
class TestSelectiveParsing(SoupTest):
def test_parse_with_soupstrainer(self):
markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>"
strainer = SoupStrainer("b")
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.encode(), b"<b>Yes</b><b>Yes <c>Yes</c></b>")
class TestEntitySubstitution(unittest.TestCase):
"""Standalone tests of the EntitySubstitution class."""
def setUp(self):
self.sub = EntitySubstitution
def test_simple_html_substitution(self):
# Unicode characters corresponding to named HTML entites
# are substituted, and no others.
s = u"foo\u2200\N{SNOWMAN}\u00f5bar"
self.assertEqual(self.sub.substitute_html(s),
u"foo∀\N{SNOWMAN}õbar")
def test_smart_quote_substitution(self):
# MS smart quotes are a common source of frustration, so we
# give them a special test.
quotes = b"\x91\x92foo\x93\x94"
dammit = UnicodeDammit(quotes)
self.assertEqual(self.sub.substitute_html(dammit.markup),
"‘’foo“”")
def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
s = 'Welcome to "my bar"'
self.assertEqual(self.sub.substitute_xml(s, False), s)
def test_xml_attribute_quoting_normally_uses_double_quotes(self):
self.assertEqual(self.sub.substitute_xml("Welcome", True),
'"Welcome"')
self.assertEqual(self.sub.substitute_xml("Bob's Bar", True),
'"Bob\'s Bar"')
def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
s = 'Welcome to "my bar"'
self.assertEqual(self.sub.substitute_xml(s, True),
"'Welcome to \"my bar\"'")
def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
s = 'Welcome to "Bob\'s Bar"'
self.assertEqual(
self.sub.substitute_xml(s, True),
'"Welcome to "Bob\'s Bar""')
def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
quoted = 'Welcome to "Bob\'s Bar"'
self.assertEqual(self.sub.substitute_xml(quoted), quoted)
def test_xml_quoting_handles_angle_brackets(self):
self.assertEqual(
self.sub.substitute_xml("foo<bar>"),
"foo<bar>")
def test_xml_quoting_handles_ampersands(self):
self.assertEqual(self.sub.substitute_xml("AT&T"), "AT&T")
def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self):
self.assertEqual(
self.sub.substitute_xml("ÁT&T"),
"&Aacute;T&T")
def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self):
self.assertEqual(
self.sub.substitute_xml_containing_entities("ÁT&T"),
"ÁT&T")
def test_quotes_not_html_substituted(self):
"""There's no need to do this except inside attribute values."""
text = 'Bob\'s "bar"'
self.assertEqual(self.sub.substitute_html(text), text)
class TestEncodingConversion(SoupTest):
# Test Beautiful Soup's ability to decode and encode from various
# encodings.
def setUp(self):
super(TestEncodingConversion, self).setUp()
self.unicode_data = u'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</foo></body></html>'
self.utf8_data = self.unicode_data.encode("utf-8")
# Just so you know what it looks like.
self.assertEqual(
self.utf8_data,
b'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\xc3\xa9 bleu!</foo></body></html>')
def test_ascii_in_unicode_out(self):
# ASCII input is converted to Unicode. The original_encoding
# attribute is set to 'utf-8', a superset of ASCII.
chardet = bs4.dammit.chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
# Disable chardet, which will realize that the ASCII is ASCII.
bs4.dammit.chardet_dammit = noop
ascii = b"<foo>a</foo>"
soup_from_ascii = self.soup(ascii)
unicode_output = soup_from_ascii.decode()
self.assertTrue(isinstance(unicode_output, unicode))
self.assertEqual(unicode_output, self.document_for(ascii.decode()))
self.assertEqual(soup_from_ascii.original_encoding.lower(), "utf-8")
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
def test_unicode_in_unicode_out(self):
# Unicode input is left alone. The original_encoding attribute
# is not set.
soup_from_unicode = self.soup(self.unicode_data)
self.assertEqual(soup_from_unicode.decode(), self.unicode_data)
self.assertEqual(soup_from_unicode.foo.string, u'Sacr\xe9 bleu!')
self.assertEqual(soup_from_unicode.original_encoding, None)
def test_utf8_in_unicode_out(self):
# UTF-8 input is converted to Unicode. The original_encoding
# attribute is set.
soup_from_utf8 = self.soup(self.utf8_data)
self.assertEqual(soup_from_utf8.decode(), self.unicode_data)
self.assertEqual(soup_from_utf8.foo.string, u'Sacr\xe9 bleu!')
def test_utf8_out(self):
# The internal data structures can be encoded as UTF-8.
soup_from_unicode = self.soup(self.unicode_data)
self.assertEqual(soup_from_unicode.encode('utf-8'), self.utf8_data)
@skipIf(
PYTHON_2_PRE_2_7 or PYTHON_3_PRE_3_2,
"Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.")
def test_attribute_name_containing_unicode_characters(self):
markup = u'<div><a \N{SNOWMAN}="snowman"></a></div>'
self.assertEqual(self.soup(markup).div.encode("utf8"), markup.encode("utf8"))
class TestUnicodeDammit(unittest.TestCase):
"""Standalone tests of UnicodeDammit."""
def test_unicode_input(self):
markup = u"I'm already Unicode! \N{SNOWMAN}"
dammit = UnicodeDammit(markup)
self.assertEqual(dammit.unicode_markup, markup)
def test_smart_quotes_to_unicode(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup)
self.assertEqual(
dammit.unicode_markup, u"<foo>\u2018\u2019\u201c\u201d</foo>")
def test_smart_quotes_to_xml_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="xml")
self.assertEqual(
dammit.unicode_markup, "<foo>‘’“”</foo>")
def test_smart_quotes_to_html_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="html")
self.assertEqual(
dammit.unicode_markup, "<foo>‘’“”</foo>")
def test_smart_quotes_to_ascii(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="ascii")
self.assertEqual(
dammit.unicode_markup, """<foo>''""</foo>""")
def test_detect_utf8(self):
utf8 = b"\xc3\xa9"
dammit = UnicodeDammit(utf8)
self.assertEqual(dammit.unicode_markup, u'\xe9')
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_convert_hebrew(self):
hebrew = b"\xed\xe5\xec\xf9"
dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
self.assertEqual(dammit.original_encoding.lower(), 'iso-8859-8')
self.assertEqual(dammit.unicode_markup, u'\u05dd\u05d5\u05dc\u05e9')
def test_dont_see_smart_quotes_where_there_are_none(self):
utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
dammit = UnicodeDammit(utf_8)
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
self.assertEqual(dammit.unicode_markup.encode("utf-8"), utf_8)
def test_ignore_inappropriate_codecs(self):
utf8_data = u"Räksmörgås".encode("utf-8")
dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_ignore_invalid_codecs(self):
utf8_data = u"Räksmörgås".encode("utf-8")
for bad_encoding in ['.utf8', '...', 'utF---16.!']:
dammit = UnicodeDammit(utf8_data, [bad_encoding])
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_detect_html5_style_meta_tag(self):
for data in (
b'<html><meta charset="euc-jp" /></html>',
b"<html><meta charset='euc-jp' /></html>",
b"<html><meta charset=euc-jp /></html>",
b"<html><meta charset=euc-jp/></html>"):
dammit = UnicodeDammit(data, is_html=True)
self.assertEqual(
"euc-jp", dammit.original_encoding)
def test_last_ditch_entity_replacement(self):
# This is a UTF-8 document that contains bytestrings
# completely incompatible with UTF-8 (ie. encoded with some other
# encoding).
#
# Since there is no consistent encoding for the document,
# Unicode, Dammit will eventually encode the document as UTF-8
# and encode the incompatible characters as REPLACEMENT
# CHARACTER.
#
# If chardet is installed, it will detect that the document
# can be converted into ISO-8859-1 without errors. This happens
# to be the wrong encoding, but it is a consistent encoding, so the
# code we're testing here won't run.
#
# So we temporarily disable chardet if it's present.
doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
<html><b>\330\250\330\252\330\261</b>
<i>\310\322\321\220\312\321\355\344</i></html>"""
chardet = bs4.dammit.chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
bs4.dammit.chardet_dammit = noop
dammit = UnicodeDammit(doc)
self.assertEqual(True, dammit.contains_replacement_characters)
self.assertTrue(u"\ufffd" in dammit.unicode_markup)
soup = BeautifulSoup(doc, "html.parser")
self.assertTrue(soup.contains_replacement_characters)
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
def test_byte_order_mark_removed(self):
# A document written in UTF-16LE will have its byte order marker stripped.
data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
dammit = UnicodeDammit(data)
self.assertEqual(u"<a>áé</a>", dammit.unicode_markup)
self.assertEqual("utf-16le", dammit.original_encoding)
def test_detwingle(self):
# Here's a UTF8 document.
utf8 = (u"\N{SNOWMAN}" * 3).encode("utf8")
# Here's a Windows-1252 document.
windows_1252 = (
u"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!"
u"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252")
# Through some unholy alchemy, they've been stuck together.
doc = utf8 + windows_1252 + utf8
# The document can't be turned into UTF-8:
self.assertRaises(UnicodeDecodeError, doc.decode, "utf8")
# Unicode, Dammit thinks the whole document is Windows-1252,
# and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃"
# But if we run it through fix_embedded_windows_1252, it's fixed:
fixed = UnicodeDammit.detwingle(doc)
self.assertEqual(
u"☃☃☃“Hi, I like Windows!”☃☃☃", fixed.decode("utf8"))
def test_detwingle_ignores_multibyte_characters(self):
# Each of these characters has a UTF-8 representation ending
# in \x93. \x93 is a smart quote if interpreted as
# Windows-1252. But our code knows to skip over multibyte
# UTF-8 characters, so they'll survive the process unscathed.
for tricky_unicode_char in (
u"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93'
u"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93'
u"\xf0\x90\x90\x93", # This is a CJK character, not sure which one.
):
input = tricky_unicode_char.encode("utf8")
self.assertTrue(input.endswith(b'\x93'))
output = UnicodeDammit.detwingle(input)
self.assertEqual(output, input)
class TestNamedspacedAttribute(SoupTest):
def test_name_may_be_none(self):
a = NamespacedAttribute("xmlns", None)
self.assertEqual(a, "xmlns")
def test_attribute_is_equivalent_to_colon_separated_string(self):
a = NamespacedAttribute("a", "b")
self.assertEqual("a:b", a)
def test_attributes_are_equivalent_if_prefix_and_name_identical(self):
a = NamespacedAttribute("a", "b", "c")
b = NamespacedAttribute("a", "b", "c")
self.assertEqual(a, b)
# The actual namespace is not considered.
c = NamespacedAttribute("a", "b", None)
self.assertEqual(a, c)
# But name and prefix are important.
d = NamespacedAttribute("a", "z", "c")
self.assertNotEqual(a, d)
e = NamespacedAttribute("z", "b", "c")
self.assertNotEqual(a, e)
class TestAttributeValueWithCharsetSubstitution(unittest.TestCase):
def test_content_meta_attribute_value(self):
value = CharsetMetaAttributeValue("euc-jp")
self.assertEqual("euc-jp", value)
self.assertEqual("euc-jp", value.original_value)
self.assertEqual("utf8", value.encode("utf8"))
def test_content_meta_attribute_value(self):
value = ContentMetaAttributeValue("text/html; charset=euc-jp")
self.assertEqual("text/html; charset=euc-jp", value)
self.assertEqual("text/html; charset=euc-jp", value.original_value)
self.assertEqual("text/html; charset=utf8", value.encode("utf8"))
| gpl-3.0 |
lgh8820/ansible-test | yjy_all_scripts/get_file_size_xiyizonghe.py | 1 | 1917 | #! /usr/bin/env python
# -*- coding:utf8 -*-
import os,sys
#! /usr/bin/env python
# -*- coding:utf8 -*-
import MySQLdb as mdb
conn=mdb.connect(host="localhost",user='root',passwd='abcxxx123',db='yjy_xiyizonghe',unix_socket='/tmp/mysql.sock')
cursor=conn.cursor()
def get_media_url_to_file():
try:
cursor.execute("select `id`,`name`,`media_url` from yjy_im_chat_aes where chapter_id=29 and media_url != '';")
datas = cursor.fetchall()
dates = []
for data in datas:
#server_url = data[1].replace('http://media.yijiaoyuan.net:9999','/data/hls').replace('_aes','').replace('aes_','').replace('http://m1.letiku.net','/data/hls')
#new_url = "/".join(data[1].split('/')[:-2]) + '/' + 'aes_' + data[1].split('/')[-2] + '/' + ''.join(data[1].split('/')[-1].split('.')[0]) + '_aes.m3u8'
date = data[2].split('/')[-3]
orignal_media = date + '_' + str(dates.count(date)) + '.mp4'
#print server_url
#print orignal_media
sql="update yjy_im_chat_aes set orignal_media = '%s' where id=%d;"%(orignal_media,data[0])
print sql
cursor.execute(sql)
except mdb.Error,e:
print e
conn.rollback()
conn.close()
def patch_file_size_from_file():
try:
with open('/root/m_xz.txt') as f:
c = f.readlines()
for line in c:
file_size = line.split('\t')[0].split('M')[0]
media_url = line.split('\t')[1].replace('/data/hls','http://media.yijiaoyuan.net:9999')
#print media_url
new_url = "/".join(media_url.split('/')[:-2]) + '/' + 'aes_' + media_url.split('/')[-2] + '/' + ''.join(media_url.split('/')[-1].split('.')[0]) + '_aes.m3u8'
#print new_url
sql = "update yjy_im_chat_aes set file_size='%s' where media_url='%s';"%(file_size,new_url.replace('_aes','').replace('aes_',''))
print sql
#cursor.execute(sql)
except mdb.Error,e:
print e
conn.rollback()
conn.close()
if __name__ == '__main__':
get_media_url_to_file()
#patch_file_size_from_file()
| mit |
repotvsupertuga/repo | plugin.video.pancas/default.py | 3 | 146144 | # -*- coding: utf-8 -*-
import urllib
import urllib2
import re
import os
import xbmcplugin
import xbmcgui
import xbmcaddon
import xbmcvfs
import traceback
import cookielib,base64
try: import urlresolver
except: pass
from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP
viewmode=None
try:
from xml.sax.saxutils import escape
except: traceback.print_exc()
try:
import json
except:
import simplejson as json
import SimpleDownloader as downloader
import time
from resources.lib.libraries import client
from resources.lib.libraries import client2
from resources.lib.libraries import control
from resources.lib.resolvers import realdebrid
from resources.lib.resolvers import premiumize
tsdownloader=False
resolve_url=['180upload.com', 'allmyvideos.net', 'embedupload.com' 'bestreams.net', 'clicknupload.com', 'cloudzilla.to', 'movshare.net', 'novamov.com', 'nowvideo.sx', 'videoweed.es', 'daclips.in', 'datemule.com', 'fastvideo.in', 'faststream.in', 'filehoot.com', 'filenuke.com', 'sharesix.com', 'plus.google.com', 'picasaweb.google.com', 'gorillavid.com', 'gorillavid.in', 'grifthost.com', 'hugefiles.net', 'ipithos.to', 'ishared.eu', 'kingfiles.net', 'mail.ru', 'my.mail.ru', 'videoapi.my.mail.ru', 'mightyupload.com', 'mooshare.biz', 'movdivx.com', 'movpod.net', 'movpod.in', 'movreel.com', 'mrfile.me', 'nosvideo.com', 'openload.co', 'played.to', 'bitshare.com', 'filefactory.com', 'k2s.cc', 'oboom.com', 'rapidgator.net', 'uploaded.net', 'primeshare.tv', 'bitshare.com', 'filefactory.com', 'k2s.cc', 'oboom.com', 'rapidgator.net', 'uploaded.net', 'sharerepo.com', 'stagevu.com', 'streamcloud.eu', 'streamin.to', 'thefile.me', 'thevideo.me', 'tusfiles.net', 'uploadc.com', 'zalaa.com', 'uploadrocket.net', 'uptobox.com', 'v-vids.com', 'veehd.com', 'vidbull.com', 'videomega.tv', 'vidplay.net', 'vidspot.net', 'vidto.me', 'vidzi.tv', 'vimeo.com', 'vk.com', 'vodlocker.com', 'xfileload.com', 'xvidstage.com', 'zettahost.tv']
g_ignoreSetResolved=['plugin.video.dramasonline','plugin.video.f4mTester','plugin.video.shahidmbcnet','plugin.video.SportsDevil','plugin.stream.vaughnlive.tv','plugin.video.ZemTV-shani']
class NoRedirection(urllib2.HTTPErrorProcessor):
def http_response(self, request, response):
return response
https_response = http_response
REMOTE_DBG=False;
if REMOTE_DBG:
# Make pydev debugger works for auto reload.
# Note pydevd module need to be copied in XBMC\system\python\Lib\pysrc
try:
import pysrc.pydevd as pydevd
# stdoutToServer and stderrToServer redirect stdout and stderr to eclipse console
pydevd.settrace('localhost', stdoutToServer=True, stderrToServer=True)
except ImportError:
sys.stderr.write("Error: " +
"You must add org.python.pydev.debug.pysrc to your PYTHONPATH.")
sys.exit(1)
addon = xbmcaddon.Addon('plugin.video.pancas')
addon_version = addon.getAddonInfo('version')
profile = xbmc.translatePath(addon.getAddonInfo('profile').decode('utf-8'))
home = xbmc.translatePath(addon.getAddonInfo('path').decode('utf-8'))
favorites = os.path.join(profile, 'favorites')
history = os.path.join(profile, 'history')
REV = os.path.join(profile, 'list_revision')
icon = os.path.join(home, 'icon.png')
FANART = os.path.join(home, 'fanart.jpg')
source_file = os.path.join(home, 'source_file')
functions_dir = profile
communityfiles = os.path.join(profile, 'LivewebTV')
downloader = downloader.SimpleDownloader()
debug = addon.getSetting('debug')
if os.path.exists(favorites)==True:
FAV = open(favorites).read()
else: FAV = []
if os.path.exists(source_file)==True:
SOURCES = open(source_file).read()
else: SOURCES = []
def addon_log(string):
if debug == 'true':
xbmc.log("[addon.pancas-%s]: %s" %(addon_version, string))
def request3(url):
try:
control.log("#RESOLVER# my url 1 ************ %s " % url)
if '</regex>' in url:
import regex ; url = regex.resolve(url)
rd = realdebrid.resolve(url)
#control.log("#RESOLVER# my rd 2 ************ %s url: %s" % (rd,url))
if not rd == None: return rd
pz = premiumize.resolve(url)
if not pz == None: return pz
if url.startswith('rtmp'):
if len(re.compile('\s*timeout=(\d*)').findall(url)) == 0: url += ' timeout=10'
return url
try:
z=False
hmf = urlresolver.HostedMediaFile(url,include_disabled=True, include_universal=False)
if hmf:
print 'yay! we can resolve this one'
z = hmf.resolve()
else:
print 'sorry :( no resolvers available to handle this one.'
control.log("!!!!!!!!! OK #urlresolver# URL %s " % z)
if z !=False : return z
except Exception as e:
control.log("!!!!!!!!! ERROR #urlresolver# URL %s " % e)
pass
return None
except:
return url
def info():
return [{
'class': '',
'netloc': ['oboom.com', 'rapidgator.net', 'uploaded.net'],
'host': ['Oboom', 'Rapidgator', 'Uploaded'],
'quality': 'High',
'captcha': False,
'a/c': True
}, {
'class': 'okru',
'netloc': ['ok.ru']
}, {
'class': '',
'netloc': ['youwatch.com'],
'host': ['youwatch'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': '_180upload',
'netloc': ['180upload.com'],
'host': ['180upload'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'allmyvideos',
'netloc': ['allmyvideos.net'],
'host': ['Allmyvideos'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'allvid',
'netloc': ['allvid.ch'],
'host': ['Allvid'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'bestreams',
'netloc': ['bestreams.net'],
'host': ['Bestreams'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'clicknupload',
'netloc': ['clicknupload.com', 'clicknupload.link'],
'host': ['Clicknupload'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'cloudtime',
'netloc': ['cloudtime.to'],
'host': ['Cloudtime'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'cloudyvideos',
'netloc': ['cloudyvideos.com'],
#'host': ['Cloudyvideos'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'cloudzilla',
'netloc': ['cloudzilla.to'],
'host': ['Cloudzilla'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'daclips',
'netloc': ['daclips.in'],
'host': ['Daclips'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'yadisk',
'netloc': ['yadi.sk']
}, {
'class': 'dailymotion',
'netloc': ['dailymotion.com']
}, {
'class': 'datemule',
'netloc': ['datemule.com']
}, {
'class': 'divxpress',
'netloc': ['divxpress.com'],
'host': ['Divxpress'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'exashare',
'netloc': ['exashare.com'],
'host': ['Exashare'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'fastvideo',
'netloc': ['fastvideo.in', 'faststream.in', 'rapidvideo.ws'],
'host': ['Fastvideo', 'Faststream'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'filehoot',
'netloc': ['filehoot.com'],
'host': ['Filehoot'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'filenuke',
'netloc': ['filenuke.com', 'sharesix.com'],
'host': ['Filenuke', 'Sharesix'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'filmon',
'netloc': ['filmon.com']
}, {
'class': 'filepup',
'netloc': ['filepup.net']
}, {
'class': 'googledocs',
'netloc': ['google.com']
}, {
'class': 'googledocs',
'netloc': ['docs.google.com', 'drive.google.com']
}, {
'class': 'googlephotos',
'netloc': ['photos.google.com']
}, {
'class': 'googlepicasa',
'netloc': ['picasaweb.google.com']
}, {
'class': 'googleplus',
'netloc': ['plus.google.com']
}, {
'class': 'gorillavid',
'netloc': ['gorillavid.com', 'gorillavid.in'],
'host': ['Gorillavid'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'grifthost',
'netloc': ['grifthost.com'],
#'host': ['Grifthost'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'hdcast',
'netloc': ['hdcast.me']
}, {
'class': 'hugefiles',
'netloc': ['hugefiles.net'],
'host': ['Hugefiles'],
'quality': 'High',
'captcha': True,
'a/c': False
}, {
'class': 'ipithos',
'netloc': ['ipithos.to'],
'host': ['Ipithos'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'ishared',
'netloc': ['ishared.eu'],
'host': ['iShared'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'kingfiles',
'netloc': ['kingfiles.net'],
'host': ['Kingfiles'],
'quality': 'High',
'captcha': True,
'a/c': False
}, {
'class': 'letwatch',
'netloc': ['letwatch.us'],
'host': ['Letwatch'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'mailru',
'netloc': ['mail.ru', 'my.mail.ru', 'videoapi.my.mail.ru', 'api.video.mail.ru']
}, {
'class': 'cloudmailru',
'netloc': ['cloud.mail.ru']
}, {
'class': 'mightyupload',
'netloc': ['mightyupload.com'],
'host': ['Mightyupload'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'movdivx',
'netloc': ['movdivx.com'],
'host': ['Movdivx'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'movpod',
'netloc': ['movpod.net', 'movpod.in'],
'host': ['Movpod'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'movshare',
'netloc': ['movshare.net'],
'host': ['Movshare'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'mrfile',
'netloc': ['mrfile.me'],
'host': ['Mrfile'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'mybeststream',
'netloc': ['mybeststream.xyz']
}, {
'class': 'nosvideo',
'netloc': ['nosvideo.com'],
#'host': ['Nosvideo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'novamov',
'netloc': ['novamov.com'],
'host': ['Novamov'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'nowvideo',
'netloc': ['nowvideo.eu', 'nowvideo.sx'],
'host': ['Nowvideo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'openload',
'netloc': ['openload.io', 'openload.co'],
'host': ['Openload'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'p2pcast',
'netloc': ['p2pcast.tv']
}, {
'class': 'primeshare',
'netloc': ['primeshare.tv'],
'host': ['Primeshare'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'promptfile',
'netloc': ['promptfile.com'],
'host': ['Promptfile'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'putstream',
'netloc': ['putstream.com'],
'host': ['Putstream'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'realvid',
'netloc': ['realvid.net'],
'host': ['Realvid'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'sawlive',
'netloc': ['sawlive.tv']
}, {
'class': 'sharerepo',
'netloc': ['sharerepo.com'],
'host': ['Sharerepo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'skyvids',
'netloc': ['skyvids.net'],
'host': ['Skyvids'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'speedvideo',
'netloc': ['speedvideo.net']
}, {
'class': 'stagevu',
'netloc': ['stagevu.com'],
'host': ['StageVu'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'streamcloud',
'netloc': ['streamcloud.eu'],
'host': ['Streamcloud'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'streamin',
'netloc': ['streamin.to'],
'host': ['Streamin'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'thefile',
'netloc': ['thefile.me'],
'host': ['Thefile'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'thevideo',
'netloc': ['thevideo.me'],
'host': ['Thevideo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'turbovideos',
'netloc': ['turbovideos.net'],
'host': ['Turbovideos'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'tusfiles',
'netloc': ['tusfiles.net'],
'host': ['Tusfiles'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'up2stream',
'netloc': ['up2stream.com'],
'host': ['Up2stream'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'uploadc',
'netloc': ['uploadc.com', 'uploadc.ch', 'zalaa.com'],
'host': ['Uploadc', 'Zalaa'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'uploadrocket',
'netloc': ['uploadrocket.net'],
'host': ['Uploadrocket'],
'quality': 'High',
'captcha': True,
'a/c': False
}, {
'class': 'uptobox',
'netloc': ['uptobox.com'],
'host': ['Uptobox'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'v_vids',
'netloc': ['v-vids.com'],
'host': ['V-vids'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'vaughnlive',
'netloc': ['vaughnlive.tv', 'breakers.tv', 'instagib.tv', 'vapers.tv']
}, {
'class': 'veehd',
'netloc': ['veehd.com']
}, {
'class': 'veetle',
'netloc': ['veetle.com']
}, {
'class': 'vidbull',
'netloc': ['vidbull.com'],
'host': ['Vidbull'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'videomega',
'netloc': ['videomega.tv'],
#'host': ['Videomega'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'videopremium',
'netloc': ['videopremium.tv', 'videopremium.me']
}, {
'class': 'videoweed',
'netloc': ['videoweed.es'],
'host': ['Videoweed'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'vidlockers',
'netloc': ['vidlockers.ag'],
'host': ['Vidlockers'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'vidspot',
'netloc': ['vidspot.net'],
'host': ['Vidspot'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'vidto',
'netloc': ['vidto.me'],
'host': ['Vidto'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'vidzi',
'netloc': ['vidzi.tv'],
'host': ['Vidzi'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'vimeo',
'netloc': ['vimeo.com']
}, {
'class': 'vk',
'netloc': ['vk.com']
}, {
'class': 'vodlocker',
'netloc': ['vodlocker.com'],
'host': ['Vodlocker'],
'quality': 'Low',
'captcha': False,
'a/c': False
}, {
'class': 'xfileload',
'netloc': ['xfileload.com'],
'host': ['Xfileload'],
'quality': 'High',
'captcha': True,
'a/c': False
}, {
'class': 'xvidstage',
'netloc': ['xvidstage.com'],
'host': ['Xvidstage'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'youtube',
'netloc': ['youtube.com'],
'host': ['Youtube'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'zettahost',
'netloc': ['zettahost.tv'],
'host': ['Zettahost'],
'quality': 'High',
'captcha': False,
'a/c': False
}, {
'class': 'zstream',
'netloc': ['zstream.to'],
'host': ['zStream'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}, {
'class': 'watch1080p',
'netloc': ['watch1080p.com'],
'host': ['watch1080p'],
'quality': 'High',
'captcha': False,
'a/c': False
}]
def makeRequest(url, headers=None):
try:
if headers is None:
headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:19.0) Gecko/20100101 Firefox/19.0'}
req = urllib2.Request(url,None,headers)
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
except urllib2.URLError, e:
addon_log('URL: '+url)
if hasattr(e, 'code'):
addon_log('We failed with error code - %s.' % e.code)
xbmc.executebuiltin("XBMC.Notification(pancas,We failed with error code - "+str(e.code)+",10000,"+icon+")")
elif hasattr(e, 'reason'):
addon_log('We failed to reach a server.')
addon_log('Reason: %s' %e.reason)
xbmc.executebuiltin("XBMC.Notification(pancas,We failed to reach a server. - "+str(e.reason)+",10000,"+icon+")")
def getsources():
try:
if os.path.exists(favorites) == True:
addDir('Favorites','url',4,os.path.join(home, 'reba', 'favorite.png'),FANART,'','','','')
if addon.getSetting("browse_xml_database") == "true":
addDir('XML Database','http://xbmcplus.xb.funpic.de/www-data/filesystem/',15,icon,FANART,'','','','')
if addon.getSetting("browse_community") == "true":
addDir('Community Files','community_files',16,icon,FANART,'','','','')
if addon.getSetting("searchotherplugins") == "true":
addDir('Search Other Plugins','Search Plugins',25,icon,FANART,'','','','')
if os.path.exists(ab)==True:
ba = json.loads(open(ab,"r").read().decode('base64').decode('base64'))
#print 'ba',ba
if len(ba) > 1:
for i in ba:
try:
## for pre 1.0.8 ba
if isinstance(i, list):
addDir(i[0].encode('utf-8'),i[1].encode('utf-8'),1,icon,FANART,'','','','','source')
else:
thumb = icon
fanart = FANART
desc = ''
date = ''
credits = ''
genre = ''
if i.has_key('thumbnail'):
thumb = i['thumbnail']
if i.has_key('fanart'):
fanart = i['fanart']
if i.has_key('description'):
desc = i['description']
if i.has_key('date'):
date = i['date']
if i.has_key('genre'):
genre = i['genre']
if i.has_key('credits'):
credits = i['credits']
addDir(i['title'].encode('utf-8'),i['url'].encode('utf-8'),1,thumb,fanart,desc,genre,date,credits,'source')
except: traceback.print_exc()
else:
if len(ba) == 1:
if isinstance(ba[0], list):
getData(ba[0][1].encode('utf-8'),FANART)
else:
getData(ba[0]['url'], ba[0]['fanart'])
except: traceback.print_exc()
def addSource(url=None):
if url is None:
if not addon.getSetting("new_file_source") == "":
source_url = addon.getSetting('new_file_source').decode('utf-8')
elif not addon.getSetting("new_url_source") == "":
source_url = addon.getSetting('new_url_source').decode('utf-8')
else:
source_url = url
if source_url == '' or source_url is None:
return
addon_log('Adding New Source: '+source_url.encode('utf-8'))
media_info = None
#print 'source_url',source_url
data = getSoup(source_url)
if isinstance(data,BeautifulSOAP):
if data.find('channels_info'):
media_info = data.channels_info
elif data.find('items_info'):
media_info = data.items_info
if media_info:
source_media = {}
source_media['url'] = source_url
try: source_media['title'] = media_info.title.string
except: pass
try: source_media['thumbnail'] = media_info.thumbnail.string
except: pass
try: source_media['fanart'] = media_info.fanart.string
except: pass
try: source_media['genre'] = media_info.genre.string
except: pass
try: source_media['description'] = media_info.description.string
except: pass
try: source_media['date'] = media_info.date.string
except: pass
try: source_media['credits'] = media_info.credits.string
except: pass
else:
if '/' in source_url:
nameStr = source_url.split('/')[-1].split('.')[0]
if '\\' in source_url:
nameStr = source_url.split('\\')[-1].split('.')[0]
if '%' in nameStr:
nameStr = urllib.unquote_plus(nameStr)
keyboard = xbmc.Keyboard(nameStr,'Displayed Name, Rename?')
keyboard.doModal()
if (keyboard.isConfirmed() == False):
return
newStr = keyboard.getText()
if len(newStr) == 0:
return
source_media = {}
source_media['title'] = newStr
source_media['url'] = source_url
source_media['fanart'] = fanart
if os.path.exists(ab)==False:
source_list = []
source_list.append(source_media)
b = open(ab,"w")
b.write(json.dumps(source_list))
b.close()
else:
ba = json.loads(open(ab,"r").read().decode('base64').decode('base64'))
ba.append(source_media)
b = open(ab,"w")
b.write(json.dumps(ba))
b.close()
addon.setSetting('new_url_source', "")
addon.setSetting('new_file_source', "")
xbmc.executebuiltin("XBMC.Notification(pancas,New source added.,5000,"+icon+")")
if not url is None:
if 'xbmcplus.xb.funpic.de' in url:
xbmc.executebuiltin("XBMC.Container.Update(%s?mode=14,replace)" %sys.argv[0])
elif 'community-links' in url:
xbmc.executebuiltin("XBMC.Container.Update(%s?mode=10,replace)" %sys.argv[0])
else: addon.openSettings()
def rmSource(name):
ba = json.loads(open(ab,"r").read().decode('base64').decode('base64'))
for index in range(len(ba)):
if isinstance(ba[index], list):
if ba[index][0] == name:
del ba[index]
b = open(ab,"w")
b.write(json.dumps(ba))
b.close()
break
else:
if ba[index]['title'] == name:
del ba[index]
b = open(ab,"w")
b.write(json.dumps(ba))
b.close()
break
xbmc.executebuiltin("XBMC.Container.Refresh")
def get_xml_database(url, browse=False):
if url is None:
url = 'http://xbmcplus.xb.funpic.de/www-data/filesystem/'
soup = BeautifulSoup(makeRequest(url), convertEntities=BeautifulSoup.HTML_ENTITIES)
for i in soup('a'):
href = i['href']
if not href.startswith('?'):
name = i.string
if name not in ['Parent Directory', 'recycle_bin/']:
if href.endswith('/'):
if browse:
addDir(name,url+href,15,icon,fanart,'','','')
else:
addDir(name,url+href,14,icon,fanart,'','','')
elif href.endswith('.xml'):
if browse:
addDir(name,url+href,1,icon,fanart,'','','','','download')
else:
if os.path.exists(ab)==True:
if name in ba:
addDir(name+' (in use)',url+href,11,icon,fanart,'','','','','download')
else:
addDir(name,url+href,11,icon,fanart,'','','','','download')
else:
addDir(name,url+href,11,icon,fanart,'','','','','download')
def getCommunityba(browse=False):
url = 'http://community-links.googlecode.com/svn/trunk/'
soup = BeautifulSoup(makeRequest(url), convertEntities=BeautifulSoup.HTML_ENTITIES)
files = soup('ul')[0]('li')[1:]
for i in files:
name = i('a')[0]['href']
if browse:
addDir(name,url+name,1,icon,fanart,'','','','','download')
else:
addDir(name,url+name,11,icon,fanart,'','','','','download')
def getSoup(url,data=None):
global viewmode,tsdownloader
tsdownloader=False
if url.startswith('http://') or url.startswith('https://'):
enckey=False
if '$$TSDOWNLOADER$$' in url:
tsdownloader=True
url=url.replace("$$TSDOWNLOADER$$","")
if '$$LSProEncKey=' in url:
enckey=url.split('$$LSProEncKey=')[1].split('$$')[0]
rp='$$LSProEncKey=%s$$'%enckey
url=url.replace(rp,"")
data =makeRequest(url)
if enckey:
import pyaes
enckey=enckey.encode("ascii")
print enckey
missingbytes=16-len(enckey)
enckey=enckey+(chr(0)*(missingbytes))
print repr(enckey)
data=base64.b64decode(data)
decryptor = pyaes.new(enckey , pyaes.MODE_ECB, IV=None)
data=decryptor.decrypt(data).split('\0')[0]
#print repr(data)
if re.search("#EXTM3U",data) or 'm3u' in url:
# print 'found m3u data'
return data
elif data == None:
if not '/' in url or not '\\' in url:
# print 'No directory found. Lets make the url to cache dir'
url = os.path.join(communityfiles,url)
if xbmcvfs.exists(url):
if url.startswith("smb://") or url.startswith("nfs://"):
copy = xbmcvfs.copy(url, os.path.join(profile, 'temp', 'sorce_temp.txt'))
if copy:
data = open(os.path.join(profile, 'temp', 'sorce_temp.txt'), "r").read()
xbmcvfs.delete(os.path.join(profile, 'temp', 'sorce_temp.txt'))
else:
addon_log("failed to copy from smb:")
else:
data = open(url, 'r').read()
if re.match("#EXTM3U",data)or 'm3u' in url:
# print 'found m3u data'
return data
else:
addon_log("Soup Data not found!")
return
if '<SetViewMode>' in data:
try:
viewmode=re.findall('<SetViewMode>(.*?)<',data)[0]
xbmc.executebuiltin("Container.SetViewMode(%s)"%viewmode)
print 'done setview',viewmode
except: pass
return BeautifulSOAP(data, convertEntities=BeautifulStoneSoup.XML_ENTITIES)
def getData(url,fanart, data=None):
soup = getSoup(url,data)
#print type(soup)
if isinstance(soup,BeautifulSOAP):
#print 'xxxxxxxxxxsoup',soup
if len(soup('channels')) > 0 and addon.getSetting('donotshowbychannels') == 'false':
channels = soup('channel')
for channel in channels:
# print channel
linkedUrl=''
lcount=0
try:
linkedUrl = channel('externallink')[0].string
lcount=len(channel('externallink'))
except: pass
#print 'linkedUrl',linkedUrl,lcount
if lcount>1: linkedUrl=''
name = channel('name')[0].string
thumbnail = channel('thumbnail')[0].string
if thumbnail == None:
thumbnail = ''
try:
if not channel('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = channel('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = channel('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = channel('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = channel('date')[0].string
if date == None:
raise
except:
date = ''
try:
credits = channel('credits')[0].string
if credits == None:
raise
except:
credits = ''
try:
if linkedUrl=='':
addDir(name.encode('utf-8', 'ignore'),url.encode('utf-8'),2,thumbnail,fanArt,desc,genre,date,credits,True)
else:
#print linkedUrl
addDir(name.encode('utf-8'),linkedUrl.encode('utf-8'),1,thumbnail,fanArt,desc,genre,date,None,'source')
except:
addon_log('There was a problem adding directory from getData(): '+name.encode('utf-8', 'ignore'))
else:
addon_log('No Channels: getItems')
getItems(soup('item'),fanart)
else:
parse_m3u(soup)
# borrow from https://github.com/enen92/P2P-Streams-XBMC/blob/master/plugin.video.p2p-streams/reba/core/livestreams.py
# This will not go through the getItems functions ( means you must have ready to play url, no regex)
def parse_m3u(data):
content = data.rstrip()
match = re.compile(r'#EXTINF:(.+?),(.*?)[\n\r]+([^\r\n]+)').findall(content)
total = len(match)
print 'tsdownloader',tsdownloader
# print 'total m3u links',total
for other,channel_name,stream_url in match:
if 'tvg-logo' in other:
thumbnail = re_me(other,'tvg-logo=[\'"](.*?)[\'"]')
if thumbnail:
if thumbnail.startswith('http'):
thumbnail = thumbnail
elif not addon.getSetting('logo-folderPath') == "":
logo_url = addon.getSetting('logo-folderPath')
thumbnail = logo_url + thumbnail
else:
thumbnail = thumbnail
#else:
else:
thumbnail = ''
if 'type' in other:
mode_type = re_me(other,'type=[\'"](.*?)[\'"]')
if mode_type == 'yt-dl':
stream_url = stream_url +"&mode=18"
elif mode_type == 'regex':
url = stream_url.split('®exs=')
#print url[0] getSoup(url,data=None)
regexs = parse_regex(getSoup('',data=url[1]))
addLink(url[0], channel_name,thumbnail,'','','','','',None,regexs,total)
continue
elif mode_type == 'ftv':
stream_url = 'plugin://plugin.video.F.T.V/?name='+urllib.quote(channel_name) +'&url=' +stream_url +'&mode=125&ch_fanart=na'
elif tsdownloader and '.ts' in stream_url:
stream_url = 'plugin://plugin.video.f4mTester/?url='+urllib.quote_plus(stream_url)+'&streamtype=TSDOWNLOADER&name='+urllib.quote(channel_name)
addLink(stream_url, channel_name,thumbnail,'','','','','',None,'',total)
def imdb_id_from_title(title):
""" return IMDB id for search string
Args::
title (str): the movie title search string
Returns:
str. IMDB id, e.g., 'tt0095016'
None. If no match was found
"""
pattern = 'http://www.imdb.com/xml/find?json=1&nr=1&tt=on&q='+title
response = urllib2.urlopen(pattern)
html = response.read()
html2=json.loads(html)
tempor = html2["title_approx"][0]["id"]
if tempor == None:
raise
else:
return tempor
#url = pattern.format(movie_title=urllib.quote(title))
#r = requests.get(url)
#res = r.json()
# sections in descending order or preference
#for section in ['popular','exact','substring']:
# key = 'title_' + section
# if key in res:
# return res[key][0]['id']
def getChannelItems(name,url,fanart):
soup = getSoup(url)
channel_list = soup.find('channel', attrs={'name' : name.decode('utf-8')})
items = channel_list('item')
try:
fanArt = channel_list('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
for channel in channel_list('subchannel'):
name = channel('name')[0].string
try:
thumbnail = channel('thumbnail')[0].string
if thumbnail == None:
raise
except:
thumbnail = ''
try:
if not channel('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = channel('fanart')[0].string
if fanArt == None:
raise
except:
pass
try:
desc = channel('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = channel('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = channel('date')[0].string
if date == None:
raise
except:
date = ''
try:
credits = channel('credits')[0].string
if credits == None:
raise
except:
credits = ''
try:
imdbID = channel('imdb')[0].string
if imdbID == None:
#imdbID = imdb_id_from_title(name)
#addon_log(name.encode('utf-8')+" - "+imdbID)
#if imdbID == None:
# raise
#else:
# response = urllib2.urlopen('http://api.themoviedb.org/3/find/'+imdbID+'?external_source=imdb_id&language=pt&api_key=3421e679385f33e2438463e286e5c918')
# html = response.read()
# html2=json.loads(html)
# tempor = html2["movie_results"][0]["overview"]
# if tempor == None:
# raise
# else:
# desc=tempor
#"""
raise
else:
desc = desc.encode('utf-8')
if (thumbnail=="" or fanArt=="" or desc.find("Nenhuma descrição")!=-1):
#addon_log("treta")
response = urllib2.urlopen('http://api.themoviedb.org/3/find/'+channel('imdb')[0].string+'?external_source=imdb_id&language=pt&api_key=3421e679385f33e2438463e286e5c918')
html = response.read()
html2=json.loads(html)
tempor = html2["movie_results"][0]["overview"]
#addon_log(html)
#addon_log(tempor)
if tempor == None:
response = urllib2.urlopen('http://api.themoviedb.org/3/find/'+channel('imdb')[0].string+'?external_source=imdb_id&language=en&api_key=3421e679385f33e2438463e286e5c918')
html = response.read()
html2=json.loads(html)
tempor = html2["movie_results"][0]["overview"]
if tempor == None:
raise
else:
desc=tempor
else:
desc=tempor
tras = html2["movie_results"][0]["backdrop_path"]
if tras == None:
raise
else:
fanArt = 'http://image.tmdb.org/t/p/w500/'+tras
poster = html2["movie_results"][0]["poster_path"]
if poster == None:
raise
else:
thumbnail = 'http://image.tmdb.org/t/p/w150/'+poster
vote_average = html2["movie_results"][0]["vote_average"]
if vote_average == None:
raise
else:
vote_average = ''
#try:
# response2 = urllib2.urlopen('http://api.themoviedb.org/3/movie/'+channel('imdb')[0].string+'/videos?api_key=3421e679385f33e2438463e286e5c918')
# html3 = response2.read()
#addon_log (len(html3.encode('utf-8')))
# if len(html3.encode('utf-8')) < 40:
# addon_log("nada")
# continue
# else:
# html4=json.loads(html3)
# tempor2 = html4["results"][0]["key"]
# addon_log(tempor2.encode('utf-8'))
#except:
# raise
except:
raise
#addon_log(name.encode('utf-8'))
#addon_log(desc.encode('utf-8'))
try:
addDir(name.encode('utf-8', 'ignore'),url.encode('utf-8'),3,thumbnail,fanArt,desc,genre,credits,date)
except:
addon_log('There was a problem adding directory - '+name.encode('utf-8', 'ignore'))
getItems(items,fanArt)
def getSubChannelItems(name,url,fanart):
soup = getSoup(url)
channel_list = soup.find('subchannel', attrs={'name' : name.decode('utf-8')})
items = channel_list('subitem')
getItems(items,fanart)
def getItems(items,fanart,dontLink=False):
total = len(items)
addon_log('Total Items: %s' %total)
add_playlist = addon.getSetting('add_playlist')
ask_playlist_items =addon.getSetting('ask_playlist_items')
use_thumb = addon.getSetting('use_thumb')
parentalblock =addon.getSetting('parentalblocked')
parentalblock= parentalblock=="true"
for item in items:
isXMLSource=False
isJsonrpc = False
applyblock='false'
try:
applyblock = item('parentalblock')[0].string
except:
addon_log('parentalblock Error')
applyblock = ''
if applyblock=='true' and parentalblock: continue
try:
name = item('title')[0].string
if name is None:
name = 'unknown?'
except:
addon_log('Name Error')
name = ''
try:
if item('epg'):
if item.epg_url:
addon_log('Get EPG Regex')
epg_url = item.epg_url.string
epg_regex = item.epg_regex.string
epg_name = get_epg(epg_url, epg_regex)
if epg_name:
name += ' - ' + epg_name
elif item('epg')[0].string > 1:
name += getepg(item('epg')[0].string)
else:
pass
except:
addon_log('EPG Error')
try:
url = []
if len(item('link')) >0:
#print 'item link', item('link')
for i in item('link'):
if not i.string == None:
url.append(i.string)
elif len(item('sportsdevil')) >0:
for i in item('sportsdevil'):
if not i.string == None:
sportsdevil = 'plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url=' +i.string
referer = item('referer')[0].string
if referer:
#print 'referer found'
sportsdevil = sportsdevil + '%26referer=' +referer
url.append(sportsdevil)
elif len(item('p2p')) >0:
for i in item('p2p'):
if not i.string == None:
if 'sop://' in i.string:
sop = 'plugin://plugin.video.p2p-streams/?mode=2url='+i.string +'&' + 'name='+name
url.append(sop)
else:
p2p='plugin://plugin.video.p2p-streams/?mode=1&url='+i.string +'&' + 'name='+name
url.append(p2p)
elif len(item('vaughn')) >0:
for i in item('vaughn'):
if not i.string == None:
vaughn = 'plugin://plugin.stream.vaughnlive.tv/?mode=PlayLiveStream&channel='+i.string
url.append(vaughn)
elif len(item('ilive')) >0:
for i in item('ilive'):
if not i.string == None:
if not 'http' in i.string:
ilive = 'plugin://plugin.video.tbh.ilive/?url=http://www.streamlive.to/view/'+i.string+'&link=99&mode=iLivePlay'
else:
ilive = 'plugin://plugin.video.tbh.ilive/?url='+i.string+'&link=99&mode=iLivePlay'
elif len(item('yt-dl')) >0:
for i in item('yt-dl'):
if not i.string == None:
ytdl = i.string + '&mode=18'
url.append(ytdl)
elif len(item('dm')) >0:
for i in item('dm'):
if not i.string == None:
dm = "plugin://plugin.video.dailymotion_com/?mode=playVideo&url=" + i.string
url.append(dm)
elif len(item('dmlive')) >0:
for i in item('dmlive'):
if not i.string == None:
dm = "plugin://plugin.video.dailymotion_com/?mode=playLiveVideo&url=" + i.string
url.append(dm)
elif len(item('utube')) >0:
for i in item('utube'):
if not i.string == None:
if ' ' in i.string :
utube = 'plugin://plugin.video.youtube/search/?q='+ urllib.quote_plus(i.string)
isJsonrpc=utube
elif len(i.string) == 11:
utube = 'plugin://plugin.video.youtube/play/?video_id='+ i.string
elif (i.string.startswith('PL') and not '&order=' in i.string) or i.string.startswith('UU'):
utube = 'plugin://plugin.video.youtube/play/?&order=default&playlist_id=' + i.string
elif i.string.startswith('PL') or i.string.startswith('UU'):
utube = 'plugin://plugin.video.youtube/play/?playlist_id=' + i.string
elif i.string.startswith('UC') and len(i.string) > 12:
utube = 'plugin://plugin.video.youtube/channel/' + i.string + '/'
isJsonrpc=utube
elif not i.string.startswith('UC') and not (i.string.startswith('PL')) :
utube = 'plugin://plugin.video.youtube/user/' + i.string + '/'
isJsonrpc=utube
url.append(utube)
elif len(item('imdb')) >0:
for i in item('imdb'):
if not i.string == None:
if addon.getSetting('genesisorpulsar') == '0':
imdb = 'plugin://plugin.video.genesis/?action=play&imdb='+i.string
else:
imdb = 'plugin://plugin.video.pulsar/movie/tt'+i.string+'/play'
url.append(imdb)
elif len(item('f4m')) >0:
for i in item('f4m'):
if not i.string == None:
if '.f4m' in i.string:
f4m = 'plugin://plugin.video.f4mTester/?url='+urllib.quote_plus(i.string)
elif '.m3u8' in i.string:
f4m = 'plugin://plugin.video.f4mTester/?url='+urllib.quote_plus(i.string)+'&streamtype=HLS'
else:
f4m = 'plugin://plugin.video.f4mTester/?url='+urllib.quote_plus(i.string)+'&streamtype=SIMPLE'
url.append(f4m)
elif len(item('ftv')) >0:
for i in item('ftv'):
if not i.string == None:
ftv = 'plugin://plugin.video.F.T.V/?name='+urllib.quote(name) +'&url=' +i.string +'&mode=125&ch_fanart=na'
url.append(ftv)
elif len(item('urlsolve')) >0:
for i in item('urlsolve'):
if not i.string == None:
resolver = i.string +'&mode=19'
url.append(resolver)
if len(url) < 1:
raise
except:
addon_log('Error <link> element, Passing:'+name.encode('utf-8', 'ignore'))
continue
try:
isXMLSource = item('externallink')[0].string
except: pass
if isXMLSource:
ext_url=[isXMLSource]
isXMLSource=True
else:
isXMLSource=False
try:
isJsonrpc = item('jsonrpc')[0].string
except: pass
if isJsonrpc:
ext_url=[isJsonrpc]
#print 'JSON-RPC ext_url',ext_url
isJsonrpc=True
else:
isJsonrpc=False
try:
thumbnail = item('thumbnail')[0].string
if thumbnail == None:
raise
except:
thumbnail = ''
try:
if not item('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = item('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = item('info')[0].string
if desc == None:
raise
except:
desc = ''
#try:
# desc = 'plugin://plugin.video.pulsar/movie/tt'+item('imdb')[0].string+'/play'
# addon_log(desc)
# addon_log("cenas")
#except:
# addon_log("nada nada nada")
try:
genre = item('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = item('date')[0].string
if date == None:
raise
except:
date = ''
regexs = None
if item('regex'):
try:
reg_item = item('regex')
regexs = parse_regex(reg_item)
except:
pass
try:
if len(url) > 1:
alt = 0
playlist = []
for i in url:
if add_playlist == "false":
alt += 1
addLink(i,'%s) %s' %(alt, name.encode('utf-8', 'ignore')),thumbnail,fanArt,desc,genre,date,True,playlist,regexs,total)
elif add_playlist == "true" and ask_playlist_items == 'true':
if regexs:
playlist.append(i+'®exs='+regexs)
elif any(x in i for x in resolve_url) and i.startswith('http'):
playlist.append(i+'&mode=19')
else:
playlist.append(i)
else:
playlist.append(i)
if len(playlist) > 1:
addLink('', name,thumbnail,fanArt,desc,genre,date,True,playlist,regexs,total)
else:
if dontLink:
return name,url[0],regexs
if isXMLSource:
if not regexs == None: #<externallink> and <regex>
addDir(name.encode('utf-8'),ext_url[0].encode('utf-8'),1,thumbnail,fanart,desc,genre,date,None,'!!update',regexs,url[0].encode('utf-8'))
#addLink(url[0],name.encode('utf-8', 'ignore')+ '[COLOR yellow]build XML[/COLOR]',thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
else:
addDir(name.encode('utf-8'),ext_url[0].encode('utf-8'),1,thumbnail,fanart,desc,genre,date,None,'source',None,None)
#addDir(name.encode('utf-8'),url[0].encode('utf-8'),1,thumbnail,fanart,desc,genre,date,None,'source')
elif isJsonrpc:
addDir(name.encode('utf-8'),ext_url[0],53,thumbnail,fanart,desc,genre,date,None,'source')
#xbmc.executebuiltin("Container.SetViewMode(500)")
else:
addLink(url[0],name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
#print 'success'
except:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
def parse_regex(reg_item):
try:
regexs = {}
for i in reg_item:
regexs[i('name')[0].string] = {}
regexs[i('name')[0].string]['name']=i('name')[0].string
#regexs[i('name')[0].string]['expres'] = i('expres')[0].string
try:
regexs[i('name')[0].string]['expres'] = i('expres')[0].string
if not regexs[i('name')[0].string]['expres']:
regexs[i('name')[0].string]['expres']=''
except:
addon_log("Regex: -- No Referer --")
regexs[i('name')[0].string]['page'] = i('page')[0].string
try:
regexs[i('name')[0].string]['referer'] = i('referer')[0].string
except:
addon_log("Regex: -- No Referer --")
try:
regexs[i('name')[0].string]['connection'] = i('connection')[0].string
except:
addon_log("Regex: -- No connection --")
try:
regexs[i('name')[0].string]['notplayable'] = i('notplayable')[0].string
except:
addon_log("Regex: -- No notplayable --")
try:
regexs[i('name')[0].string]['noredirect'] = i('noredirect')[0].string
except:
addon_log("Regex: -- No noredirect --")
try:
regexs[i('name')[0].string]['origin'] = i('origin')[0].string
except:
addon_log("Regex: -- No origin --")
try:
regexs[i('name')[0].string]['accept'] = i('accept')[0].string
except:
addon_log("Regex: -- No accept --")
try:
regexs[i('name')[0].string]['includeheaders'] = i('includeheaders')[0].string
except:
addon_log("Regex: -- No includeheaders --")
try:
regexs[i('name')[0].string]['listrepeat'] = i('listrepeat')[0].string
# print 'listrepeat',regexs[i('name')[0].string]['listrepeat'],i('listrepeat')[0].string, i
except:
addon_log("Regex: -- No listrepeat --")
try:
regexs[i('name')[0].string]['proxy'] = i('proxy')[0].string
except:
addon_log("Regex: -- No proxy --")
try:
regexs[i('name')[0].string]['x-req'] = i('x-req')[0].string
except:
addon_log("Regex: -- No x-req --")
try:
regexs[i('name')[0].string]['x-addr'] = i('x-addr')[0].string
except:
addon_log("Regex: -- No x-addr --")
try:
regexs[i('name')[0].string]['x-forward'] = i('x-forward')[0].string
except:
addon_log("Regex: -- No x-forward --")
try:
regexs[i('name')[0].string]['agent'] = i('agent')[0].string
except:
addon_log("Regex: -- No User Agent --")
try:
regexs[i('name')[0].string]['post'] = i('post')[0].string
except:
addon_log("Regex: -- Not a post")
try:
regexs[i('name')[0].string]['rawpost'] = i('rawpost')[0].string
except:
addon_log("Regex: -- Not a rawpost")
try:
regexs[i('name')[0].string]['htmlunescape'] = i('htmlunescape')[0].string
except:
addon_log("Regex: -- Not a htmlunescape")
try:
regexs[i('name')[0].string]['readcookieonly'] = i('readcookieonly')[0].string
except:
addon_log("Regex: -- Not a readCookieOnly")
#print i
try:
regexs[i('name')[0].string]['cookiejar'] = i('cookiejar')[0].string
if not regexs[i('name')[0].string]['cookiejar']:
regexs[i('name')[0].string]['cookiejar']=''
except:
addon_log("Regex: -- Not a cookieJar")
try:
regexs[i('name')[0].string]['setcookie'] = i('setcookie')[0].string
except:
addon_log("Regex: -- Not a setcookie")
try:
regexs[i('name')[0].string]['appendcookie'] = i('appendcookie')[0].string
except:
addon_log("Regex: -- Not a appendcookie")
try:
regexs[i('name')[0].string]['ignorecache'] = i('ignorecache')[0].string
except:
addon_log("Regex: -- no ignorecache")
#try:
# regexs[i('name')[0].string]['ignorecache'] = i('ignorecache')[0].string
#except:
# addon_log("Regex: -- no ignorecache")
regexs = urllib.quote(repr(regexs))
return regexs
#print regexs
except:
regexs = None
addon_log('regex Error: '+name.encode('utf-8', 'ignore'))
#copies from lamda's implementation
def get_ustream(url):
try:
for i in range(1, 51):
result = getUrl(url)
if "EXT-X-STREAM-INF" in result: return url
if not "EXTM3U" in result: return
xbmc.sleep(2000)
return
except:
return
def getRegexParsed(regexs, url,cookieJar=None,forCookieJarOnly=False,recursiveCall=False,cachedPages={}, rawPost=False, cookie_jar_file=None):#0,1,2 = URL, regexOnly, CookieJarOnly
if not recursiveCall:
regexs = eval(urllib.unquote(regexs))
#cachedPages = {}
#print 'url',url
doRegexs = re.compile('\$doregex\[([^\]]*)\]').findall(url)
# print 'doRegexs',doRegexs,regexs
setresolved=True
for k in doRegexs:
if k in regexs:
#print 'processing ' ,k
m = regexs[k]
#print m
cookieJarParam=False
if 'cookiejar' in m: # so either create or reuse existing jar
#print 'cookiejar exists',m['cookiejar']
cookieJarParam=m['cookiejar']
if '$doregex' in cookieJarParam:
cookieJar=getRegexParsed(regexs, m['cookiejar'],cookieJar,True, True,cachedPages)
cookieJarParam=True
else:
cookieJarParam=True
#print 'm[cookiejar]',m['cookiejar'],cookieJar
if cookieJarParam:
if cookieJar==None:
#print 'create cookie jar'
cookie_jar_file=None
if 'open[' in m['cookiejar']:
cookie_jar_file=m['cookiejar'].split('open[')[1].split(']')[0]
# print 'cookieJar from file name',cookie_jar_file
cookieJar=getCookieJar(cookie_jar_file)
# print 'cookieJar from file',cookieJar
if cookie_jar_file:
saveCookieJar(cookieJar,cookie_jar_file)
#import cookielib
#cookieJar = cookielib.LWPCookieJar()
#print 'cookieJar new',cookieJar
elif 'save[' in m['cookiejar']:
cookie_jar_file=m['cookiejar'].split('save[')[1].split(']')[0]
complete_path=os.path.join(profile,cookie_jar_file)
# print 'complete_path',complete_path
saveCookieJar(cookieJar,cookie_jar_file)
if m['page'] and '$doregex' in m['page']:
pg=getRegexParsed(regexs, m['page'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
if len(pg)==0:
pg='http://regexfailed'
m['page']=pg
if 'setcookie' in m and m['setcookie'] and '$doregex' in m['setcookie']:
m['setcookie']=getRegexParsed(regexs, m['setcookie'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
if 'appendcookie' in m and m['appendcookie'] and '$doregex' in m['appendcookie']:
m['appendcookie']=getRegexParsed(regexs, m['appendcookie'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
if 'post' in m and '$doregex' in m['post']:
m['post']=getRegexParsed(regexs, m['post'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
# print 'post is now',m['post']
if 'rawpost' in m and '$doregex' in m['rawpost']:
m['rawpost']=getRegexParsed(regexs, m['rawpost'],cookieJar,recursiveCall=True,cachedPages=cachedPages,rawPost=True)
#print 'rawpost is now',m['rawpost']
if 'rawpost' in m and '$epoctime$' in m['rawpost']:
m['rawpost']=m['rawpost'].replace('$epoctime$',getEpocTime())
if 'rawpost' in m and '$epoctime2$' in m['rawpost']:
m['rawpost']=m['rawpost'].replace('$epoctime2$',getEpocTime2())
link=''
if m['page'] and m['page'] in cachedPages and not 'ignorecache' in m and forCookieJarOnly==False :
#print 'using cache page',m['page']
link = cachedPages[m['page']]
else:
if m['page'] and not m['page']=='' and m['page'].startswith('http'):
if '$epoctime$' in m['page']:
m['page']=m['page'].replace('$epoctime$',getEpocTime())
if '$epoctime2$' in m['page']:
m['page']=m['page'].replace('$epoctime2$',getEpocTime2())
#print 'Ingoring Cache',m['page']
page_split=m['page'].split('|')
pageUrl=page_split[0]
header_in_page=None
if len(page_split)>1:
header_in_page=page_split[1]
# if
# proxy = urllib2.ProxyHandler({ ('https' ? proxytouse[:5]=="https":"http") : proxytouse})
# opener = urllib2.build_opener(proxy)
# urllib2.install_opener(opener)
# import urllib2
# print 'urllib2.getproxies',urllib2.getproxies()
current_proxies=urllib2.ProxyHandler(urllib2.getproxies())
#print 'getting pageUrl',pageUrl
req = urllib2.Request(pageUrl)
if 'proxy' in m:
proxytouse= m['proxy']
# print 'proxytouse',proxytouse
# urllib2.getproxies= lambda: {}
if pageUrl[:5]=="https":
proxy = urllib2.ProxyHandler({ 'https' : proxytouse})
#req.set_proxy(proxytouse, 'https')
else:
proxy = urllib2.ProxyHandler({ 'http' : proxytouse})
#req.set_proxy(proxytouse, 'http')
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
proxytouse=None
if 'referer' in m:
req.add_header('Referer', m['referer'])
if 'accept' in m:
req.add_header('Accept', m['accept'])
if 'agent' in m:
req.add_header('User-agent', m['agent'])
if 'x-req' in m:
req.add_header('X-Requested-With', m['x-req'])
if 'x-addr' in m:
req.add_header('x-addr', m['x-addr'])
if 'x-forward' in m:
req.add_header('X-Forwarded-For', m['x-forward'])
if 'setcookie' in m:
# print 'adding cookie',m['setcookie']
req.add_header('Cookie', m['setcookie'])
if 'appendcookie' in m:
# print 'appending cookie to cookiejar',m['appendcookie']
cookiestoApend=m['appendcookie']
cookiestoApend=cookiestoApend.split(';')
for h in cookiestoApend:
n,v=h.split('=')
w,n= n.split(':')
ck = cookielib.Cookie(version=0, name=n, value=v, port=None, port_specified=False, domain=w, domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False)
cookieJar.set_cookie(ck)
if 'origin' in m:
req.add_header('Origin', m['origin'])
if header_in_page:
header_in_page=header_in_page.split('&')
for h in header_in_page:
n,v=h.split('=')
req.add_header(n,v)
if not cookieJar==None:
# print 'cookieJarVal',cookieJar
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
opener = urllib2.install_opener(opener)
# print 'noredirect','noredirect' in m
if 'noredirect' in m:
opener = urllib2.build_opener(cookie_handler,NoRedirection, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
opener = urllib2.install_opener(opener)
elif 'noredirect' in m:
opener = urllib2.build_opener(NoRedirection, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
opener = urllib2.install_opener(opener)
if 'connection' in m:
# print '..........................connection//////.',m['connection']
from keepalive import HTTPHandler
keepalive_handler = HTTPHandler()
opener = urllib2.build_opener(keepalive_handler)
urllib2.install_opener(opener)
#print 'after cookie jar'
post=None
if 'post' in m:
postData=m['post']
#if '$LiveStreamRecaptcha' in postData:
# (captcha_challenge,catpcha_word,idfield)=processRecaptcha(m['page'],cookieJar)
# if captcha_challenge:
# postData=postData.replace('$LiveStreamRecaptcha','manual_recaptcha_challenge_field:'+captcha_challenge+',recaptcha_response_field:'+catpcha_word+',id:'+idfield)
splitpost=postData.split(',');
post={}
for p in splitpost:
n=p.split(':')[0];
v=p.split(':')[1];
post[n]=v
post = urllib.urlencode(post)
if 'rawpost' in m:
post=m['rawpost']
#if '$LiveStreamRecaptcha' in post:
# (captcha_challenge,catpcha_word,idfield)=processRecaptcha(m['page'],cookieJar)
# if captcha_challenge:
# post=post.replace('$LiveStreamRecaptcha','&manual_recaptcha_challenge_field='+captcha_challenge+'&recaptcha_response_field='+catpcha_word+'&id='+idfield)
link=''
try:
if post:
response = urllib2.urlopen(req,post)
else:
response = urllib2.urlopen(req)
if response.info().get('Content-Encoding') == 'gzip':
from StringIO import StringIO
import gzip
buf = StringIO( response.read())
f = gzip.GzipFile(fileobj=buf)
link = f.read()
else:
link=response.read()
if 'proxy' in m and not current_proxies is None:
urllib2.install_opener(urllib2.build_opener(current_proxies))
link=javascriptUnEscape(link)
#print repr(link)
#print link This just print whole webpage in LOG
if 'includeheaders' in m:
#link+=str(response.headers.get('Set-Cookie'))
link+='$$HEADERS_START$$:'
for b in response.headers:
link+= b+':'+response.headers.get(b)+'\n'
link+='$$HEADERS_END$$:'
# print link
addon_log(link)
addon_log(cookieJar )
response.close()
except:
pass
cachedPages[m['page']] = link
#print link
#print 'store link for',m['page'],forCookieJarOnly
if forCookieJarOnly:
return cookieJar# do nothing
elif m['page'] and not m['page'].startswith('http'):
if m['page'].startswith('$pyFunction:'):
val=doEval(m['page'].split('$pyFunction:')[1],'',cookieJar,m )
if forCookieJarOnly:
return cookieJar# do nothing
link=val
link=javascriptUnEscape(link)
else:
link=m['page']
if '$pyFunction:playmedia(' in m['expres'] or 'ActivateWindow' in m['expres'] or '$PLAYERPROXY$=' in url or any(x in url for x in g_ignoreSetResolved):
setresolved=False
if '$doregex' in m['expres']:
m['expres']=getRegexParsed(regexs, m['expres'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
if not m['expres']=='':
#print 'doing it ',m['expres']
if '$LiveStreamCaptcha' in m['expres']:
val=askCaptcha(m,link,cookieJar)
#print 'url and val',url,val
url = url.replace("$doregex[" + k + "]", val)
elif m['expres'].startswith('$pyFunction:') or '#$pyFunction' in m['expres']:
#print 'expeeeeeeeeeeeeeeeeeee',m['expres']
val=''
if m['expres'].startswith('$pyFunction:'):
val=doEval(m['expres'].split('$pyFunction:')[1],link,cookieJar,m)
else:
val=doEvalFunction(m['expres'],link,cookieJar,m)
if 'ActivateWindow' in m['expres']: return
# print 'url k val',url,k,val
#print 'repr',repr(val)
try:
url = url.replace(u"$doregex[" + k + "]", val)
except: url = url.replace("$doregex[" + k + "]", val.decode("utf-8"))
else:
if 'listrepeat' in m:
listrepeat=m['listrepeat']
ret=re.findall(m['expres'],link)
return listrepeat,ret, m,regexs
val=''
if not link=='':
#print 'link',link
reg = re.compile(m['expres']).search(link)
try:
val=reg.group(1).strip()
except: traceback.print_exc()
elif m['page']=='' or m['page']==None:
val=m['expres']
if rawPost:
# print 'rawpost'
val=urllib.quote_plus(val)
if 'htmlunescape' in m:
#val=urllib.unquote_plus(val)
import HTMLParser
val=HTMLParser.HTMLParser().unescape(val)
try:
url = url.replace("$doregex[" + k + "]", val)
except: url = url.replace("$doregex[" + k + "]", val.decode("utf-8"))
#print 'ur',url
#return val
else:
url = url.replace("$doregex[" + k + "]",'')
if '$epoctime$' in url:
url=url.replace('$epoctime$',getEpocTime())
if '$epoctime2$' in url:
url=url.replace('$epoctime2$',getEpocTime2())
if '$GUID$' in url:
import uuid
url=url.replace('$GUID$',str(uuid.uuid1()).upper())
if '$get_cookies$' in url:
url=url.replace('$get_cookies$',getCookiesString(cookieJar))
if recursiveCall: return url
#print 'final url',repr(url)
if url=="":
return
else:
return url,setresolved
def getmd5(t):
import hashlib
h=hashlib.md5()
h.update(t)
return h.hexdigest()
def decrypt_vaughnlive(encrypted):
retVal=""
# print 'enc',encrypted
#for val in encrypted.split(':'):
# retVal+=chr(int(val.replace("0m0","")))
#return retVal
def playmedia(media_url):
try:
import CustomPlayer
player = CustomPlayer.MyXBMCPlayer()
listitem = xbmcgui.ListItem( label = str(name), iconImage = "DefaultVideo.png", thumbnailImage = xbmc.getInfoImage( "ListItem.Thumb" ), path=media_url )
player.play( media_url,listitem)
xbmc.sleep(1000)
while player.is_active:
xbmc.sleep(200)
except:
traceback.print_exc()
return ''
def kodiJsonRequest(params):
data = json.dumps(params)
request = xbmc.executeJSONRPC(data)
try:
response = json.loads(request)
except UnicodeDecodeError:
response = json.loads(request.decode('utf-8', 'ignore'))
try:
if 'result' in response:
return response['result']
return None
except KeyError:
logger.warn("[%s] %s" % (params['method'], response['error']['message']))
return None
def setKodiProxy(proxysettings=None):
if proxysettings==None:
# print 'proxy set to nothing'
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.usehttpproxy", "value":false}, "id":1}')
else:
ps=proxysettings.split(':')
proxyURL=ps[0]
proxyPort=ps[1]
proxyType=ps[2]
proxyUsername=None
proxyPassword=None
if len(ps)>3 and '@' in ps[3]: #jairox ###proxysettings
proxyUsername=ps[3].split('@')[0] #jairox ###ps[3]
proxyPassword=ps[3].split('@')[1] #jairox ###proxysettings.split('@')[-1]
# print 'proxy set to', proxyType, proxyURL,proxyPort
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.usehttpproxy", "value":true}, "id":1}')
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.httpproxytype", "value":' + str(proxyType) +'}, "id":1}')
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.httpproxyserver", "value":"' + str(proxyURL) +'"}, "id":1}')
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.httpproxyport", "value":' + str(proxyPort) +'}, "id":1}')
if not proxyUsername==None:
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.httpproxyusername", "value":"' + str(proxyUsername) +'"}, "id":1}')
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.httpproxypassword", "value":"' + str(proxyPassword) +'"}, "id":1}')
def getConfiguredProxy():
proxyActive = kodiJsonRequest({'jsonrpc': '2.0', "method":"Settings.GetSettingValue", "params":{"setting":"network.usehttpproxy"}, 'id': 1})['value']
# print 'proxyActive',proxyActive
proxyType = kodiJsonRequest({'jsonrpc': '2.0', "method":"Settings.GetSettingValue", "params":{"setting":"network.httpproxytype"}, 'id': 1})['value']
if proxyActive: # PROXY_HTTP
proxyURL = kodiJsonRequest({'jsonrpc': '2.0', "method":"Settings.GetSettingValue", "params":{"setting":"network.httpproxyserver"}, 'id': 1})['value']
proxyPort = unicode(kodiJsonRequest({'jsonrpc': '2.0', "method":"Settings.GetSettingValue", "params":{"setting":"network.httpproxyport"}, 'id': 1})['value'])
proxyUsername = kodiJsonRequest({'jsonrpc': '2.0', "method":"Settings.GetSettingValue", "params":{"setting":"network.httpproxyusername"}, 'id': 1})['value']
proxyPassword = kodiJsonRequest({'jsonrpc': '2.0', "method":"Settings.GetSettingValue", "params":{"setting":"network.httpproxypassword"}, 'id': 1})['value']
if proxyUsername and proxyPassword and proxyURL and proxyPort:
return proxyURL + ':' + str(proxyPort)+':'+str(proxyType) + ':' + proxyUsername + '@' + proxyPassword
elif proxyURL and proxyPort:
return proxyURL + ':' + str(proxyPort)+':'+str(proxyType)
else:
return None
def playmediawithproxy(media_url, name, iconImage,proxyip,port, proxyuser=None, proxypass=None): #jairox
progress = xbmcgui.DialogProgress()
progress.create('Progress', 'Playing with custom proxy')
progress.update( 10, "", "setting proxy..", "" )
proxyset=False
existing_proxy=''
#print 'playmediawithproxy'
try:
existing_proxy=getConfiguredProxy()
print 'existing_proxy',existing_proxy
#read and set here
#jairox
if not proxyuser == None:
setKodiProxy( proxyip + ':' + port + ':0:' + proxyuser + '@' + proxypass)
else:
setKodiProxy( proxyip + ':' + port + ':0')
#print 'proxy setting complete', getConfiguredProxy()
proxyset=True
progress.update( 80, "", "setting proxy complete, now playing", "" )
progress.close()
progress=None
import CustomPlayer
player = CustomPlayer.MyXBMCPlayer()
listitem = xbmcgui.ListItem( label = str(name), iconImage = iconImage, thumbnailImage = xbmc.getInfoImage( "ListItem.Thumb" ), path=media_url )
player.play( media_url,listitem)
xbmc.sleep(1000)
while player.is_active:
xbmc.sleep(200)
except:
traceback.print_exc()
if progress:
progress.close()
if proxyset:
# print 'now resetting the proxy back'
setKodiProxy(existing_proxy)
# print 'reset here'
return ''
def get_saw_rtmp(page_value, referer=None):
if referer:
referer=[('Referer',referer)]
if page_value.startswith("http"):
page_url=page_value
page_value= getUrl(page_value,headers=referer)
str_pattern="(eval\(function\(p,a,c,k,e,(?:r|d).*)"
reg_res=re.compile(str_pattern).findall(page_value)
r=""
if reg_res and len(reg_res)>0:
for v in reg_res:
r1=get_unpacked(v)
r2=re_me(r1,'\'(.*?)\'')
if 'unescape' in r1:
r1=urllib.unquote(r2)
r+=r1+'\n'
# print 'final value is ',r
page_url=re_me(r,'src="(.*?)"')
page_value= getUrl(page_url,headers=referer)
#print page_value
rtmp=re_me(page_value,'streamer\'.*?\'(.*?)\'\)')
playpath=re_me(page_value,'file\',\s\'(.*?)\'')
return rtmp+' playpath='+playpath +' pageUrl='+page_url
def get_leton_rtmp(page_value, referer=None):
if referer:
referer=[('Referer',referer)]
if page_value.startswith("http"):
page_value= getUrl(page_value,headers=referer)
str_pattern="var a = (.*?);\s*var b = (.*?);\s*var c = (.*?);\s*var d = (.*?);\s*var f = (.*?);\s*var v_part = '(.*?)';"
reg_res=re.compile(str_pattern).findall(page_value)[0]
a,b,c,d,f,v=(reg_res)
f=int(f)
a=int(a)/f
b=int(b)/f
c=int(c)/f
d=int(d)/f
ret= 'rtmp://' + str(a) + '.' + str(b) + '.' + str(c) + '.' + str(d) + v;
return ret
def createM3uForDash(url,useragent=None):
str='#EXTM3U'
str+='\n#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=361816'
str+='\n'+url+'&bytes=0-200000'#+'|User-Agent='+useragent
ab = os.path.join(profile, 'testfile.m3u')
str+='\n'
SaveToFile(ab,str)
#return 'C:/Users/shani/Downloads/test.m3u8'
return ab
def SaveToFile(file_name,page_data,append=False):
if append:
f = open(file_name, 'a')
f.write(page_data)
f.close()
else:
f=open(file_name,'wb')
f.write(page_data)
f.close()
return ''
def LoadFile(file_name):
f=open(file_name,'rb')
d=f.read()
f.close()
return d
def get_packed_iphonetv_url(page_data):
import re,base64,urllib;
s=page_data
while 'geh(' in s:
if s.startswith('lol('): s=s[5:-1]
# print 's is ',s
s=re.compile('"(.*?)"').findall(s)[0];
s= base64.b64decode(s);
s=urllib.unquote(s);
print s
return s
def get_ferrari_url(page_data):
# print 'get_dag_url2',page_data
page_data2=getUrl(page_data);
patt='(http.*)'
import uuid
playback=str(uuid.uuid1()).upper()
links=re.compile(patt).findall(page_data2)
headers=[('X-Playback-Session-Id',playback)]
for l in links:
try:
page_datatemp=getUrl(l,headers=headers);
except: pass
return page_data+'|&X-Playback-Session-Id='+playback
def get_dag_url(page_data):
# print 'get_dag_url',page_data
if page_data.startswith('http://dag.total-stream.net'):
headers=[('User-Agent','Verismo-BlackUI_(2.4.7.5.8.0.34)')]
page_data=getUrl(page_data,headers=headers);
if '127.0.0.1' in page_data:
return revist_dag(page_data)
elif re_me(page_data, 'wmsAuthSign%3D([^%&]+)') != '':
final_url = re_me(page_data, '&ver_t=([^&]+)&') + '?wmsAuthSign=' + re_me(page_data, 'wmsAuthSign%3D([^%&]+)') + '==/mp4:' + re_me(page_data, '\\?y=([^&]+)&')
else:
final_url = re_me(page_data, 'href="([^"]+)"[^"]+$')
if len(final_url)==0:
final_url=page_data
final_url = final_url.replace(' ', '%20')
return final_url
def re_me(data, re_patten):
match = ''
m = re.search(re_patten, data)
if m != None:
match = m.group(1)
else:
match = ''
return match
def revist_dag(page_data):
final_url = ''
if '127.0.0.1' in page_data:
final_url = re_me(page_data, '&ver_t=([^&]+)&') + ' live=true timeout=15 playpath=' + re_me(page_data, '\\?y=([a-zA-Z0-9-_\\.@]+)')
if re_me(page_data, 'token=([^&]+)&') != '':
final_url = final_url + '?token=' + re_me(page_data, 'token=([^&]+)&')
elif re_me(page_data, 'wmsAuthSign%3D([^%&]+)') != '':
final_url = re_me(page_data, '&ver_t=([^&]+)&') + '?wmsAuthSign=' + re_me(page_data, 'wmsAuthSign%3D([^%&]+)') + '==/mp4:' + re_me(page_data, '\\?y=([^&]+)&')
else:
final_url = re_me(page_data, 'HREF="([^"]+)"')
if 'dag1.asx' in final_url:
return get_dag_url(final_url)
if 'devinlivefs.fplive.net' not in final_url:
final_url = final_url.replace('devinlive', 'flive')
if 'permlivefs.fplive.net' not in final_url:
final_url = final_url.replace('permlive', 'flive')
return final_url
def get_unwise( str_eval):
page_value=""
try:
ss="w,i,s,e=("+str_eval+')'
exec (ss)
page_value=unwise_func(w,i,s,e)
except: traceback.print_exc(file=sys.stdout)
#print 'unpacked',page_value
return page_value
def unwise_func( w, i, s, e):
lIll = 0;
ll1I = 0;
Il1l = 0;
ll1l = [];
l1lI = [];
while True:
if (lIll < 5):
l1lI.append(w[lIll])
elif (lIll < len(w)):
ll1l.append(w[lIll]);
lIll+=1;
if (ll1I < 5):
l1lI.append(i[ll1I])
elif (ll1I < len(i)):
ll1l.append(i[ll1I])
ll1I+=1;
if (Il1l < 5):
l1lI.append(s[Il1l])
elif (Il1l < len(s)):
ll1l.append(s[Il1l]);
Il1l+=1;
if (len(w) + len(i) + len(s) + len(e) == len(ll1l) + len(l1lI) + len(e)):
break;
lI1l = ''.join(ll1l)#.join('');
I1lI = ''.join(l1lI)#.join('');
ll1I = 0;
l1ll = [];
for lIll in range(0,len(ll1l),2):
#print 'array i',lIll,len(ll1l)
ll11 = -1;
if ( ord(I1lI[ll1I]) % 2):
ll11 = 1;
#print 'val is ', lI1l[lIll: lIll+2]
l1ll.append(chr( int(lI1l[lIll: lIll+2], 36) - ll11));
ll1I+=1;
if (ll1I >= len(l1lI)):
ll1I = 0;
ret=''.join(l1ll)
if 'eval(function(w,i,s,e)' in ret:
# print 'STILL GOing'
ret=re.compile('eval\(function\(w,i,s,e\).*}\((.*?)\)').findall(ret)[0]
return get_unwise(ret)
else:
# print 'FINISHED'
return ret
def get_unpacked( page_value, regex_for_text='', iterations=1, total_iteration=1):
try:
reg_data=None
if page_value.startswith("http"):
page_value= getUrl(page_value)
# print 'page_value',page_value
if regex_for_text and len(regex_for_text)>0:
try:
page_value=re.compile(regex_for_text).findall(page_value)[0] #get the js variable
except: return 'NOTPACKED'
page_value=unpack(page_value,iterations,total_iteration)
except:
page_value='UNPACKEDFAILED'
traceback.print_exc(file=sys.stdout)
# print 'unpacked',page_value
if 'sav1live.tv' in page_value:
page_value=page_value.replace('sav1live.tv','sawlive.tv') #quick fix some bug somewhere
# print 'sav1 unpacked',page_value
return page_value
def unpack(sJavascript,iteration=1, totaliterations=2 ):
# print 'iteration',iteration
if sJavascript.startswith('var _0xcb8a='):
aSplit=sJavascript.split('var _0xcb8a=')
ss="myarray="+aSplit[1].split("eval(")[0]
exec(ss)
a1=62
c1=int(aSplit[1].split(",62,")[1].split(',')[0])
p1=myarray[0]
k1=myarray[3]
with open('temp file'+str(iteration)+'.js', "wb") as filewriter:
filewriter.write(str(k1))
#aa=1/0
else:
if "rn p}('" in sJavascript:
aSplit = sJavascript.split("rn p}('")
else:
aSplit = sJavascript.split("rn A}('")
# print aSplit
p1,a1,c1,k1=('','0','0','')
ss="p1,a1,c1,k1=('"+aSplit[1].split(".spli")[0]+')'
exec(ss)
k1=k1.split('|')
aSplit = aSplit[1].split("))'")
# print ' p array is ',len(aSplit)
# print len(aSplit )
#p=str(aSplit[0]+'))')#.replace("\\","")#.replace('\\\\','\\')
#print aSplit[1]
#aSplit = aSplit[1].split(",")
#print aSplit[0]
#a = int(aSplit[1])
#c = int(aSplit[2])
#k = aSplit[3].split(".")[0].replace("'", '').split('|')
#a=int(a)
#c=int(c)
#p=p.replace('\\', '')
# print 'p val is ',p[0:100],'............',p[-100:],len(p)
# print 'p1 val is ',p1[0:100],'............',p1[-100:],len(p1)
#print a,a1
#print c,a1
#print 'k val is ',k[-10:],len(k)
# print 'k1 val is ',k1[-10:],len(k1)
e = ''
d = ''#32823
#sUnpacked = str(__unpack(p, a, c, k, e, d))
sUnpacked1 = str(__unpack(p1, a1, c1, k1, e, d,iteration))
#print sUnpacked[:200]+'....'+sUnpacked[-100:], len(sUnpacked)
# print sUnpacked1[:200]+'....'+sUnpacked1[-100:], len(sUnpacked1)
#exec('sUnpacked1="'+sUnpacked1+'"')
if iteration>=totaliterations:
# print 'final res',sUnpacked1[:200]+'....'+sUnpacked1[-100:], len(sUnpacked1)
return sUnpacked1#.replace('\\\\', '\\')
else:
# print 'final res for this iteration is',iteration
return unpack(sUnpacked1,iteration+1)#.replace('\\', ''),iteration)#.replace('\\', '');#unpack(sUnpacked.replace('\\', ''))
def __unpack(p, a, c, k, e, d, iteration,v=1):
#with open('before file'+str(iteration)+'.js', "wb") as filewriter:
# filewriter.write(str(p))
while (c >= 1):
c = c -1
if (k[c]):
aa=str(__itoaNew(c, a))
if v==1:
p=re.sub('\\b' + aa +'\\b', k[c], p)# THIS IS Bloody slow!
else:
p=findAndReplaceWord(p,aa,k[c])
#p=findAndReplaceWord(p,aa,k[c])
#with open('after file'+str(iteration)+'.js', "wb") as filewriter:
# filewriter.write(str(p))
return p
#
#function equalavent to re.sub('\\b' + aa +'\\b', k[c], p)
def findAndReplaceWord(source_str, word_to_find,replace_with):
splits=None
splits=source_str.split(word_to_find)
if len(splits)>1:
new_string=[]
current_index=0
for current_split in splits:
#print 'here',i
new_string.append(current_split)
val=word_to_find#by default assume it was wrong to split
#if its first one and item is blank then check next item is valid or not
if current_index==len(splits)-1:
val='' # last one nothing to append normally
else:
if len(current_split)==0: #if blank check next one with current split value
if ( len(splits[current_index+1])==0 and word_to_find[0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') or (len(splits[current_index+1])>0 and splits[current_index+1][0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_'):# first just just check next
val=replace_with
#not blank, then check current endvalue and next first value
else:
if (splits[current_index][-1].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') and (( len(splits[current_index+1])==0 and word_to_find[0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') or (len(splits[current_index+1])>0 and splits[current_index+1][0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_')):# first just just check next
val=replace_with
new_string.append(val)
current_index+=1
#aaaa=1/0
source_str=''.join(new_string)
return source_str
def __itoa(num, radix):
# print 'num red',num, radix
result = ""
if num==0: return '0'
while num > 0:
result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result
num /= radix
return result
def __itoaNew(cc, a):
aa="" if cc < a else __itoaNew(int(cc / a),a)
cc = (cc % a)
bb=chr(cc + 29) if cc> 35 else str(__itoa(cc,36))
return aa+bb
def getCookiesString(cookieJar):
try:
cookieString=""
for index, cookie in enumerate(cookieJar):
cookieString+=cookie.name + "=" + cookie.value +";"
except: pass
#print 'cookieString',cookieString
return cookieString
def saveCookieJar(cookieJar,COOKIEFILE):
try:
complete_path=os.path.join(profile,COOKIEFILE)
cookieJar.save(complete_path,ignore_discard=True)
except: pass
def getCookieJar(COOKIEFILE):
cookieJar=None
if COOKIEFILE:
try:
complete_path=os.path.join(profile,COOKIEFILE)
cookieJar = cookielib.LWPCookieJar()
cookieJar.load(complete_path,ignore_discard=True)
except:
cookieJar=None
if not cookieJar:
cookieJar = cookielib.LWPCookieJar()
return cookieJar
def doEval(fun_call,page_data,Cookie_Jar,m):
ret_val=''
#print fun_call
if functions_dir not in sys.path:
sys.path.append(functions_dir)
# print fun_call
try:
py_file='import '+fun_call.split('.')[0]
# print py_file,sys.path
exec( py_file)
# print 'done'
except:
#print 'error in import'
traceback.print_exc(file=sys.stdout)
# print 'ret_val='+fun_call
exec ('ret_val='+fun_call)
# print ret_val
#exec('ret_val=1+1')
try:
return str(ret_val)
except: return ret_val
def doEvalFunction(fun_call,page_data,Cookie_Jar,m):
# print 'doEvalFunction'
ret_val=''
if functions_dir not in sys.path:
sys.path.append(functions_dir)
f=open(functions_dir+"/LSProdynamicCode.py","w")
f.write(fun_call);
f.close()
import LSProdynamicCode
ret_val=LSProdynamicCode.GetLSProData(page_data,Cookie_Jar,m)
try:
return str(ret_val)
except: return ret_val
def getGoogleRecaptchaResponse(captchakey, cj,type=1): #1 for get, 2 for post, 3 for rawpost
# #headers=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')]
# html_text=getUrl(url,noredir=True, cookieJar=cj,headers=headers)
# print 'html_text',html_text
recapChallenge=""
solution=""
# cap_reg="recap.*?\?k=(.*?)\""
# match =re.findall(cap_reg, html_text)
# print 'match',match
captcha=False
captcha_reload_response_chall=None
solution=None
if len(captchakey)>0: #new shiny captcha!
captcha_url=captchakey
if not captcha_url.startswith('http'):
captcha_url='http://www.google.com/recaptcha/api/challenge?k='+captcha_url+'&ajax=1'
# print 'captcha_url',captcha_url
captcha=True
cap_chall_reg='challenge.*?\'(.*?)\''
cap_image_reg='\'(.*?)\''
captcha_script=getUrl(captcha_url,cookieJar=cj)
recapChallenge=re.findall(cap_chall_reg, captcha_script)[0]
captcha_reload='http://www.google.com/recaptcha/api/reload?c=';
captcha_k=captcha_url.split('k=')[1]
captcha_reload+=recapChallenge+'&k='+captcha_k+'&reason=i&type=image&lang=en'
captcha_reload_js=getUrl(captcha_reload,cookieJar=cj)
captcha_reload_response_chall=re.findall(cap_image_reg, captcha_reload_js)[0]
captcha_image_url='http://www.google.com/recaptcha/api/image?c='+captcha_reload_response_chall
if not captcha_image_url.startswith("http"):
captcha_image_url='http://www.google.com/recaptcha/api/'+captcha_image_url
import random
n=random.randrange(100,1000,5)
local_captcha = os.path.join(profile,str(n) +"captcha.img" )
localFile = open(local_captcha, "wb")
localFile.write(getUrl(captcha_image_url,cookieJar=cj))
localFile.close()
solver = InputWindow(captcha=local_captcha)
solution = solver.get()
os.remove(local_captcha)
if captcha_reload_response_chall:
if type==1:
return 'recaptcha_challenge_field='+urllib.quote_plus(captcha_reload_response_chall)+'&recaptcha_response_field='+urllib.quote_plus(solution)
elif type==2:
return 'recaptcha_challenge_field:'+captcha_reload_response_chall+',recaptcha_response_field:'+solution
else:
return 'recaptcha_challenge_field='+urllib.quote_plus(captcha_reload_response_chall)+'&recaptcha_response_field='+urllib.quote_plus(solution)
else:
return ''
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None, noredir=False):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
if noredir:
opener = urllib2.build_opener(NoRedirection,cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
else:
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
link=response.read()
response.close()
return link;
def get_decode(str,reg=None):
if reg:
str=re.findall(reg, str)[0]
s1 = urllib.unquote(str[0: len(str)-1]);
t = '';
for i in range( len(s1)):
t += chr(ord(s1[i]) - s1[len(s1)-1]);
t=urllib.unquote(t)
# print t
return t
def javascriptUnEscape(str):
js=re.findall('unescape\(\'(.*?)\'',str)
# print 'js',js
if (not js==None) and len(js)>0:
for j in js:
#print urllib.unquote(j)
str=str.replace(j ,urllib.unquote(j))
return str
iid=0
def askCaptcha(m,html_page, cookieJar):
global iid
iid+=1
expre= m['expres']
page_url = m['page']
captcha_regex=re.compile('\$LiveStreamCaptcha\[([^\]]*)\]').findall(expre)[0]
captcha_url=re.compile(captcha_regex).findall(html_page)[0]
# print expre,captcha_regex,captcha_url
if not captcha_url.startswith("http"):
page_='http://'+"".join(page_url.split('/')[2:3])
if captcha_url.startswith("/"):
captcha_url=page_+captcha_url
else:
captcha_url=page_+'/'+captcha_url
local_captcha = os.path.join(profile, str(iid)+"captcha.jpg" )
localFile = open(local_captcha, "wb")
# print ' c capurl',captcha_url
req = urllib2.Request(captcha_url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
if 'referer' in m:
req.add_header('Referer', m['referer'])
if 'agent' in m:
req.add_header('User-agent', m['agent'])
if 'setcookie' in m:
# print 'adding cookie',m['setcookie']
req.add_header('Cookie', m['setcookie'])
#cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
#opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
urllib2.urlopen(req)
response = urllib2.urlopen(req)
localFile.write(response.read())
response.close()
localFile.close()
solver = InputWindow(captcha=local_captcha)
solution = solver.get()
return solution
def askCaptchaNew(imageregex,html_page,cookieJar,m):
global iid
iid+=1
if not imageregex=='':
if html_page.startswith("http"):
page_=getUrl(html_page,cookieJar=cookieJar)
else:
page_=html_page
captcha_url=re.compile(imageregex).findall(html_page)[0]
else:
captcha_url=html_page
if 'oneplay.tv/embed' in html_page:
import oneplay
page_=getUrl(html_page,cookieJar=cookieJar)
captcha_url=oneplay.getCaptchaUrl(page_)
local_captcha = os.path.join(profile, str(iid)+"captcha.jpg" )
localFile = open(local_captcha, "wb")
# print ' c capurl',captcha_url
req = urllib2.Request(captcha_url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
if 'referer' in m:
req.add_header('Referer', m['referer'])
if 'agent' in m:
req.add_header('User-agent', m['agent'])
if 'accept' in m:
req.add_header('Accept', m['accept'])
if 'setcookie' in m:
# print 'adding cookie',m['setcookie']
req.add_header('Cookie', m['setcookie'])
#cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
#opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
#urllib2.urlopen(req)
response = urllib2.urlopen(req)
localFile.write(response.read())
response.close()
localFile.close()
solver = InputWindow(captcha=local_captcha)
solution = solver.get()
return solution
#########################################################
# Function : GUIEditExportName #
#########################################################
# Parameter : #
# #
# name sugested name for export #
# #
# Returns : #
# #
# name name of export excluding any extension #
# #
#########################################################
def TakeInput(name, headname):
kb = xbmc.Keyboard('default', 'heading', True)
kb.setDefault(name)
kb.setHeading(headname)
kb.setHiddenInput(False)
return kb.getText()
#########################################################
class InputWindow(xbmcgui.WindowDialog):
def __init__(self, *args, **kwargs):
self.cptloc = kwargs.get('captcha')
self.img = xbmcgui.ControlImage(335,30,624,60,self.cptloc)
self.addControl(self.img)
self.kbd = xbmc.Keyboard()
def get(self):
self.show()
time.sleep(2)
self.kbd.doModal()
if (self.kbd.isConfirmed()):
text = self.kbd.getText()
self.close()
return text
self.close()
return False
def getEpocTime():
import time
return str(int(time.time()*1000))
def getEpocTime2():
import time
return str(int(time.time()))
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def getFavorites():
items = json.loads(open(favorites).read())
total = len(items)
for i in items:
name = i[0]
url = i[1]
iconimage = i[2]
try:
fanArt = i[3]
if fanArt == None:
raise
except:
if addon.getSetting('use_thumb') == "true":
fanArt = iconimage
else:
fanArt = fanart
try: playlist = i[5]
except: playlist = None
try: regexs = i[6]
except: regexs = None
if i[4] == 0:
addLink(url,name,iconimage,fanArt,'','','','fav',playlist,regexs,total)
else:
addDir(name,url,i[4],iconimage,fanart,'','','','','fav')
def addFavorite(name,url,iconimage,fanart,mode,playlist=None,regexs=None):
favList = []
try:
# seems that after
name = name.encode('utf-8', 'ignore')
except:
pass
if os.path.exists(favorites)==False:
addon_log('Making Favorites File')
favList.append((name,url,iconimage,fanart,mode,playlist,regexs))
a = open(favorites, "w")
a.write(json.dumps(favList))
a.close()
else:
addon_log('Appending Favorites')
a = open(favorites).read()
data = json.loads(a)
data.append((name,url,iconimage,fanart,mode))
b = open(favorites, "w")
b.write(json.dumps(data))
b.close()
def rmFavorite(name):
data = json.loads(open(favorites).read())
for index in range(len(data)):
if data[index][0]==name:
del data[index]
b = open(favorites, "w")
b.write(json.dumps(data))
b.close()
break
xbmc.executebuiltin("XBMC.Container.Refresh")
def urlsolver(url):
import urlresolver
host = urlresolver.HostedMediaFile(url)
if host:
resolver = urlresolver.resolve(url)
resolved = resolver
if isinstance(resolved,list):
for k in resolved:
quality = addon.getSetting('quality')
if k['quality'] == 'HD' :
resolver = k['url']
break
elif k['quality'] == 'SD' :
resolver = k['url']
elif k['quality'] == '1080p' and addon.getSetting('1080pquality') == 'true' :
resolver = k['url']
break
else:
resolver = resolved
else:
xbmc.executebuiltin("XBMC.Notification(pancas,Urlresolver donot support this domain. - ,5000)")
resolver=url
return resolver
def play_playlist(name, mu_playlist,queueVideo=None):
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
if addon.getSetting('ask_playlist_items') == 'true' and not queueVideo :
import urlparse
names = []
for i in mu_playlist:
d_name=urlparse.urlparse(i).netloc
if d_name == '':
names.append(name)
else:
names.append(d_name)
dialog = xbmcgui.Dialog()
index = dialog.select('Choose a video source', names)
if index >= 0:
if "&mode=19" in mu_playlist[index]:
#playsetresolved (urlsolver(mu_playlist[index].replace('&mode=19','')),name,iconimage,True)
xbmc.Player().play(urlsolver(mu_playlist[index].replace('&mode=19','').replace(';','')))
elif "$doregex" in mu_playlist[index] :
# print mu_playlist[index]
sepate = mu_playlist[index].split('®exs=')
# print sepate
url,setresolved = getRegexParsed(sepate[1], sepate[0])
url2 = url.replace(';','')
xbmc.Player().play(url2)
else:
url = mu_playlist[index]
xbmc.Player().play(url)
elif not queueVideo:
#playlist = xbmc.PlayList(1) # 1 means video
playlist.clear()
item = 0
for i in mu_playlist:
item += 1
info = xbmcgui.ListItem('%s) %s' %(str(item),name))
# Don't do this as regex parsed might take longer
try:
if "$doregex" in i:
sepate = i.split('®exs=')
# print sepate
url,setresolved = getRegexParsed(sepate[1], sepate[0])
elif "&mode=19" in i:
url = urlsolver(i.replace('&mode=19','').replace(';',''))
if url:
playlist.add(url, info)
else:
raise
except Exception:
playlist.add(i, info)
pass #xbmc.Player().play(url)
xbmc.executebuiltin('playlist.playoffset(video,0)')
else:
listitem = xbmcgui.ListItem(name)
playlist.add(mu_playlist, listitem)
def download_file(name, url):
if addon.getSetting('save_location') == "":
xbmc.executebuiltin("XBMC.Notification('pancas','Choose a location to save files.',15000,"+icon+")")
addon.openSettings()
params = {'url': url, 'download_path': addon.getSetting('save_location')}
downloader.download(name, params)
dialog = xbmcgui.Dialog()
ret = dialog.yesno('pancas', 'Do you want to add this file as a source?')
if ret:
addSource(os.path.join(addon.getSetting('save_location'), name))
def _search(url,name):
# print url,name
pluginsearchurls = ['plugin://plugin.video.genesis/?action=shows_search',\
'plugin://plugin.video.genesis/?action=movies_search',\
'plugin://plugin.video.salts/?mode=search&section=Movies',\
'plugin://plugin.video.salts/?mode=search&section=TV',\
'plugin://plugin.video.muchmovies.hd/?action=movies_search',\
'plugin://plugin.video.viooz.co/?action=root_search',\
'plugin://plugin.video.ororotv/?action=shows_search',\
'plugin://plugin.video.yifymovies.hd/?action=movies_search',\
'plugin://plugin.video.cartoonhdtwo/?description&fanart&iconimage&mode=3&name=Search&url=url',\
'plugin://plugin.video.youtube/kodion/search/list/',\
'plugin://plugin.video.dailymotion_com/?mode=search&url',\
'plugin://plugin.video.vimeo/kodion/search/list/'\
]
names = ['Gensis TV','Genesis Movie','Salt movie','salt TV','Muchmovies','viooz','ORoroTV',\
'Yifymovies','cartoonHD','Youtube','DailyMotion','Vimeo']
dialog = xbmcgui.Dialog()
index = dialog.select('Choose a video source', names)
if index >= 0:
url = pluginsearchurls[index]
# print 'url',url
pluginquerybyJSON(url)
def addDir(name,url,mode,iconimage,fanart,description,genre,date,credits,showcontext=False,regexs=None,reg_url=None,allinfo={}):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&fanart="+urllib.quote_plus(fanart)
ok=True
if date == '':
date = None
else:
description += '\n\nDate: %s' %date
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
if len(allinfo) <1 :
liz.setInfo(type="Video", infoLabels={ "Title": name, "Plot": description, "Genre": genre, "dateadded": date, "credits": credits })
else:
liz.setInfo(type="Video", infoLabels= allinfo)
liz.setProperty("Fanart_Image", fanart)
if showcontext:
contextMenu = []
parentalblock =addon.getSetting('parentalblocked')
parentalblock= parentalblock=="true"
parentalblockedpin =addon.getSetting('parentalblockedpin')
# print 'parentalblockedpin',parentalblockedpin
if len(parentalblockedpin)>0:
if parentalblock:
contextMenu.append(('Disable Parental Block','XBMC.RunPlugin(%s?mode=55&name=%s)' %(sys.argv[0], urllib.quote_plus(name))))
else:
contextMenu.append(('Enable Parental Block','XBMC.RunPlugin(%s?mode=56&name=%s)' %(sys.argv[0], urllib.quote_plus(name))))
if showcontext == 'source':
if name in str(ba):
contextMenu.append(('Remove from ba','XBMC.RunPlugin(%s?mode=8&name=%s)' %(sys.argv[0], urllib.quote_plus(name))))
elif showcontext == 'download':
contextMenu.append(('Download','XBMC.RunPlugin(%s?url=%s&mode=9&name=%s)'
%(sys.argv[0], urllib.quote_plus(url), urllib.quote_plus(name))))
elif showcontext == 'fav':
contextMenu.append(('Remove from pancas Favorites','XBMC.RunPlugin(%s?mode=6&name=%s)'
%(sys.argv[0], urllib.quote_plus(name))))
if showcontext == '!!update':
fav_params2 = (
'%s?url=%s&mode=17®exs=%s'
%(sys.argv[0], urllib.quote_plus(reg_url), regexs)
)
contextMenu.append(('[COLOR yellow]!!update[/COLOR]','XBMC.RunPlugin(%s)' %fav_params2))
if not name in FAV:
contextMenu.append(('Add to pancas Favorites','XBMC.RunPlugin(%s?mode=5&name=%s&url=%s&iconimage=%s&fanart=%s&fav_mode=%s)'
%(sys.argv[0], urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(iconimage), urllib.quote_plus(fanart), mode)))
liz.addContextMenuItems(contextMenu)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def ytdl_download(url,title,media_type='video'):
# play in xbmc while playing go back to contextMenu(c) to "!!Download!!"
# Trial yasceen: seperate |User-Agent=
import youtubedl
if not url == '':
if media_type== 'audio':
youtubedl.single_YD(url,download=True,audio=True)
else:
youtubedl.single_YD(url,download=True)
elif xbmc.Player().isPlaying() == True :
import YDStreamExtractor
if YDStreamExtractor.isDownloading() == True:
YDStreamExtractor.manageDownloads()
else:
xbmc_url = xbmc.Player().getPlayingFile()
xbmc_url = xbmc_url.split('|User-Agent=')[0]
info = {'url':xbmc_url,'title':title,'media_type':media_type}
youtubedl.single_YD('',download=True,dl_info=info)
else:
xbmc.executebuiltin("XBMC.Notification(DOWNLOAD,First Play [COLOR yellow]WHILE playing download[/COLOR] ,10000)")
## Lunatixz PseudoTV feature
def ascii(string):
if isinstance(string, basestring):
if isinstance(string, unicode):
string = string.encode('ascii', 'ignore')
return string
def uni(string, encoding = 'utf-8'):
if isinstance(string, basestring):
if not isinstance(string, unicode):
string = unicode(string, encoding, 'ignore')
return string
def removeNonAscii(s): return "".join(filter(lambda x: ord(x)<128, s))
def sendJSON( command):
data = ''
try:
data = xbmc.executeJSONRPC(uni(command))
except UnicodeEncodeError:
data = xbmc.executeJSONRPC(ascii(command))
return uni(data)
def pluginquerybyJSON(url,give_me_result=None,playlist=False):
if 'audio' in url:
json_query = uni('{"jsonrpc":"2.0","method":"Files.GetDirectory","params": {"directory":"%s","media":"video", "properties": ["title", "album", "artist", "duration","thumbnail", "year"]}, "id": 1}') %url
else:
json_query = uni('{"jsonrpc":"2.0","method":"Files.GetDirectory","params":{"directory":"%s","media":"video","properties":[ "plot","playcount","director", "genre","votes","duration","trailer","premiered","thumbnail","title","year","dateadded","fanart","rating","season","episode","studio","mpaa"]},"id":1}') %url
json_folder_detail = json.loads(sendJSON(json_query))
#print json_folder_detail
if give_me_result:
return json_folder_detail
if json_folder_detail.has_key('error'):
return
else:
for i in json_folder_detail['result']['files'] :
meta ={}
url = i['file']
name = removeNonAscii(i['label'])
thumbnail = removeNonAscii(i['thumbnail'])
fanart = removeNonAscii(i['fanart'])
meta = dict((k,v) for k, v in i.iteritems() if not v == '0' or not v == -1 or v == '')
meta.pop("file", None)
if i['filetype'] == 'file':
if playlist:
play_playlist(name,url,queueVideo='1')
continue
else:
addLink(url,name,thumbnail,fanart,'','','','',None,'',total=len(json_folder_detail['result']['files']),allinfo=meta)
#xbmc.executebuiltin("Container.SetViewMode(500)")
if i['type'] and i['type'] == 'tvshow' :
xbmcplugin.setContent(int(sys.argv[1]), 'tvshows')
elif i['episode'] > 0 :
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
else:
addDir(name,url,53,thumbnail,fanart,'','','','',allinfo=meta)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def addLink(url,name,iconimage,fanart,description,genre,date,showcontext,playlist,regexs,total,setCookie="",allinfo={}):
#print 'url,name',url,name
contextMenu =[]
parentalblock =addon.getSetting('parentalblocked')
parentalblock= parentalblock=="true"
parentalblockedpin =addon.getSetting('parentalblockedpin')
# print 'parentalblockedpin',parentalblockedpin
if len(parentalblockedpin)>0:
if parentalblock:
contextMenu.append(('Disable Parental Block','XBMC.RunPlugin(%s?mode=55&name=%s)' %(sys.argv[0], urllib.quote_plus(name))))
else:
contextMenu.append(('Enable Parental Block','XBMC.RunPlugin(%s?mode=56&name=%s)' %(sys.argv[0], urllib.quote_plus(name))))
try:
name = name.encode('utf-8')
except: pass
ok = True
isFolder=False
if regexs:
mode = '17'
if 'listrepeat' in regexs:
isFolder=True
# print 'setting as folder in link'
contextMenu.append(('[COLOR white]!!Download Currently Playing!![/COLOR]','XBMC.RunPlugin(%s?url=%s&mode=21&name=%s)'
%(sys.argv[0], urllib.quote_plus(url), urllib.quote_plus(name))))
elif (any(x in url for x in resolve_url) and url.startswith('http')) or url.endswith('&mode=19'):
url=url.replace('&mode=19','')
mode = '19'
contextMenu.append(('[COLOR white]!!Download Currently Playing!![/COLOR]','XBMC.RunPlugin(%s?url=%s&mode=21&name=%s)'
%(sys.argv[0], urllib.quote_plus(url), urllib.quote_plus(name))))
elif url.endswith('&mode=18'):
url=url.replace('&mode=18','')
mode = '18'
contextMenu.append(('[COLOR white]!!Download!![/COLOR]','XBMC.RunPlugin(%s?url=%s&mode=23&name=%s)'
%(sys.argv[0], urllib.quote_plus(url), urllib.quote_plus(name))))
if addon.getSetting('dlaudioonly') == 'true':
contextMenu.append(('!!Download [COLOR seablue]Audio!![/COLOR]','XBMC.RunPlugin(%s?url=%s&mode=24&name=%s)'
%(sys.argv[0], urllib.quote_plus(url), urllib.quote_plus(name))))
elif url.startswith('magnet:?xt='):
if '&' in url and not '&' in url :
url = url.replace('&','&')
url = 'plugin://plugin.video.pulsar/play?uri=' + url
mode = '12'
else:
mode = '12'
contextMenu.append(('[COLOR white]!!Download Currently Playing!![/COLOR]','XBMC.RunPlugin(%s?url=%s&mode=21&name=%s)'
%(sys.argv[0], urllib.quote_plus(url), urllib.quote_plus(name))))
if 'plugin://plugin.video.youtube/play/?video_id=' in url:
yt_audio_url = url.replace('plugin://plugin.video.youtube/play/?video_id=','https://www.youtube.com/watch?v=')
contextMenu.append(('!!Download [COLOR blue]Audio!![/COLOR]','XBMC.RunPlugin(%s?url=%s&mode=24&name=%s)'
%(sys.argv[0], urllib.quote_plus(yt_audio_url), urllib.quote_plus(name))))
u=sys.argv[0]+"?"
play_list = False
if playlist:
if addon.getSetting('add_playlist') == "false":
u += "url="+urllib.quote_plus(url)+"&mode="+mode
else:
u += "mode=13&name=%s&playlist=%s" %(urllib.quote_plus(name), urllib.quote_plus(str(playlist).replace(',','||')))
name = name + '[COLOR magenta] (' + str(len(playlist)) + ' items )[/COLOR]'
play_list = True
else:
u += "url="+urllib.quote_plus(url)+"&mode="+mode
if regexs:
u += "®exs="+regexs
if not setCookie == '':
u += "&setCookie="+urllib.quote_plus(setCookie)
if date == '':
date = None
else:
description += '\n\nDate: %s' %date
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
if len(allinfo) <1:
liz.setInfo(type="Video", infoLabels={ "Title": name, "Plot": description, "Genre": genre, "dateadded": date })
else:
liz.setInfo(type="Video", infoLabels=allinfo)
liz.setProperty("Fanart_Image", fanart)
if (not play_list) and not any(x in url for x in g_ignoreSetResolved) and not '$PLAYERPROXY$=' in url:# (not url.startswith('plugin://plugin.video.f4mTester')):
if regexs:
#print urllib.unquote_plus(regexs)
if '$pyFunction:playmedia(' not in urllib.unquote_plus(regexs) and 'notplayable' not in urllib.unquote_plus(regexs) and 'listrepeat' not in urllib.unquote_plus(regexs) :
#print 'setting isplayable',url, urllib.unquote_plus(regexs),url
liz.setProperty('IsPlayable', 'true')
else:
liz.setProperty('IsPlayable', 'true')
else:
addon_log( 'NOT setting isplayable'+url)
if showcontext:
#contextMenu = []
if showcontext == 'fav':
contextMenu.append(
('Remove from pancas Favorites','XBMC.RunPlugin(%s?mode=6&name=%s)'
%(sys.argv[0], urllib.quote_plus(name)))
)
elif not name in FAV:
try:
fav_params = (
'%s?mode=5&name=%s&url=%s&iconimage=%s&fanart=%s&fav_mode=0'
%(sys.argv[0], urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(iconimage), urllib.quote_plus(fanart))
)
except:
fav_params = (
'%s?mode=5&name=%s&url=%s&iconimage=%s&fanart=%s&fav_mode=0'
%(sys.argv[0], urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(iconimage.encode("utf-8")), urllib.quote_plus(fanart.encode("utf-8")))
)
if playlist:
fav_params += 'playlist='+urllib.quote_plus(str(playlist).replace(',','||'))
if regexs:
fav_params += "®exs="+regexs
contextMenu.append(('Add to pancas Favorites','XBMC.RunPlugin(%s)' %fav_params))
liz.addContextMenuItems(contextMenu)
if not playlist is None:
if addon.getSetting('add_playlist') == "false":
playlist_name = name.split(') ')[1]
contextMenu_ = [
('Play '+playlist_name+' PlayList','XBMC.RunPlugin(%s?mode=13&name=%s&playlist=%s)'
%(sys.argv[0], urllib.quote_plus(playlist_name), urllib.quote_plus(str(playlist).replace(',','||'))))
]
liz.addContextMenuItems(contextMenu_)
#print 'adding',name
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,totalItems=total,isFolder=isFolder)
#print 'added',name
return ok
def playsetresolved(url,name,iconimage,setresolved=True,reg=None):
print url
if setresolved:
setres=True
if '$$LSDirect$$' in url:
url=url.replace('$$LSDirect$$','')
setres=False
if reg and 'notplayable' in reg:
setres=False
liz = xbmcgui.ListItem(name, iconImage=iconimage)
liz.setInfo(type='Video', infoLabels={'Title':name})
liz.setProperty("IsPlayable","true")
liz.setPath(url)
if not setres:
xbmc.Player().play(url)
else:
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, liz)
else:
xbmc.executebuiltin('XBMC.RunPlugin('+url+')')
## Thanks to daschacka, an epg scraper for http://i.teleboy.ch/programm/station_select.php
## http://forum.xbmc.org/post.php?p=936228&postcount=1076
def getepg(link):
url=urllib.urlopen(link)
source=url.read()
url.close()
source2 = source.split("Jetzt")
source3 = source2[1].split('programm/detail.php?const_id=')
sourceuhrzeit = source3[1].split('<br /><a href="/')
nowtime = sourceuhrzeit[0][40:len(sourceuhrzeit[0])]
sourcetitle = source3[2].split("</a></p></div>")
nowtitle = sourcetitle[0][17:len(sourcetitle[0])]
nowtitle = nowtitle.encode('utf-8')
return " - "+nowtitle+" - "+nowtime
def get_epg(url, regex):
data = makeRequest(url)
try:
item = re.findall(regex, data)[0]
return item
except:
addon_log('regex failed')
addon_log(regex)
return
##not a generic implemenation as it needs to convert
def d2x(d, root="root",nested=0):
op = lambda tag: '<' + tag + '>'
cl = lambda tag: '</' + tag + '>\n'
ml = lambda v,xml: xml + op(key) + str(v) + cl(key)
xml = op(root) + '\n' if root else ""
for key,vl in d.iteritems():
vtype = type(vl)
if nested==0: key='regex' #enforcing all top level tags to be named as regex
if vtype is list:
for v in vl:
v=escape(v)
xml = ml(v,xml)
if vtype is dict:
xml = ml('\n' + d2x(vl,None,nested+1),xml)
if vtype is not list and vtype is not dict:
if not vl is None: vl=escape(vl)
#print repr(vl)
if vl is None:
xml = ml(vl,xml)
else:
#xml = ml(escape(vl.encode("utf-8")),xml)
xml = ml(vl.encode("utf-8"),xml)
xml += cl(root) if root else ""
return xml
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
try:
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
except:
pass
try:
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_LABEL)
except:
pass
try:
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_DATE)
except:
pass
try:
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_GENRE)
except:
pass
params=get_params()
url=None
name=None
mode=None
playlist=None
iconimage=None
fanart=FANART
playlist=None
fav_mode=None
regexs=None
try:
url=urllib.unquote_plus(params["url"]).decode('utf-8')
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
try:
fanart=urllib.unquote_plus(params["fanart"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
try:
playlist=eval(urllib.unquote_plus(params["playlist"]).replace('||',','))
except:
pass
try:
fav_mode=int(params["fav_mode"])
except:
pass
try:
regexs=params["regexs"]
except:
pass
playitem=''
try:
playitem=urllib.unquote_plus(params["playitem"])
except:
pass
addon_log("Mode: "+str(mode))
ab = os.path.join(home, 'utube.py')
ba = open(ab).read().decode('base64').decode('base64')
if not url is None:
addon_log("URL: "+str(url.encode('utf-8')))
addon_log("Name: "+str(name))
if not playitem =='':
s=getSoup('',data=playitem)
name,url,regexs=getItems(s,None,dontLink=True)
mode=117
if mode==None:
addon_log("getsources")
getsources()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==1:
addon_log("getData")
data=None
if regexs:
data=getRegexParsed(regexs, url)
url=''
#create xml here
getData(url,fanart,data)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==2:
addon_log("getChannelItems")
getChannelItems(name,url,fanart)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==3:
addon_log("getSubChannelItems")
getSubChannelItems(name,url,fanart)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==4:
addon_log("getFavorites")
getFavorites()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==5:
addon_log("addFavorite")
try:
name = name.split('\\ ')[1]
except:
pass
try:
name = name.split(' - ')[0]
except:
pass
addFavorite(name,url,iconimage,fanart,fav_mode)
elif mode==6:
addon_log("rmFavorite")
try:
name = name.split('\\ ')[1]
except:
pass
try:
name = name.split(' - ')[0]
except:
pass
rmFavorite(name)
elif mode==7:
addon_log("addSource")
addSource(url)
elif mode==8:
addon_log("rmSource")
rmSource(name)
elif mode==9:
addon_log("download_file")
download_file(name, url)
elif mode==10:
addon_log("getCommunityba")
getCommunityba()
elif mode==11:
addon_log("addSource")
addSource(url)
elif mode==12:
addon_log("setResolvedUrl")
if not url.startswith("plugin://plugin") or not any(x in url for x in g_ignoreSetResolved):#not url.startswith("plugin://plugin.video.f4mTester") :
setres=True
if '$$LSDirect$$' in url:
url=url.replace('$$LSDirect$$','')
setres=False
item = xbmcgui.ListItem(path=url)
if not setres:
xbmc.Player().play(url)
else:
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
else:
# print 'Not setting setResolvedUrl'
xbmc.executebuiltin('XBMC.RunPlugin('+url+')')
elif mode==13:
addon_log("play_playlist")
play_playlist(name, playlist)
elif mode==14:
addon_log("get_xml_database")
get_xml_database(url)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==15:
addon_log("browse_xml_database")
get_xml_database(url, True)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==16:
addon_log("browse_community")
getCommunityba(url,browse=True)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==17 or mode==117:
addon_log("getRegexParsed")
data=None
if regexs and 'listrepeat' in urllib.unquote_plus(regexs):
listrepeat,ret,m,regexs =getRegexParsed(regexs, url)
#print listrepeat,ret,m,regexs
d=''
# print 'm is' , m
# print 'regexs',regexs
regexname=m['name']
existing_list=regexs.pop(regexname)
# print 'final regexs',regexs,regexname
url=''
import copy
ln=''
rnumber=0
for obj in ret:
try:
rnumber+=1
newcopy=copy.deepcopy(regexs)
# print 'newcopy',newcopy, len(newcopy)
listrepeatT=listrepeat
i=0
for i in range(len(obj)):
# print 'i is ',i, len(obj), len(newcopy)
if len(newcopy)>0:
for the_keyO, the_valueO in newcopy.iteritems():
if the_valueO is not None:
for the_key, the_value in the_valueO.iteritems():
if the_value is not None:
# print 'key and val',the_key, the_value
# print 'aa'
# print '[' + regexname+'.param'+str(i+1) + ']'
# print repr(obj[i])
if type(the_value) is dict:
for the_keyl, the_valuel in the_value.iteritems():
if the_valuel is not None:
val=None
if isinstance(obj,tuple):
try:
val= obj[i].decode('utf-8')
except:
val= obj[i]
else:
try:
val= obj.decode('utf-8')
except:
val= obj
if '[' + regexname+'.param'+str(i+1) + '][DE]' in the_valuel:
the_valuel=the_valuel.replace('[' + regexname+'.param'+str(i+1) + '][DE]', unescape(val))
the_value[the_keyl]=the_valuel.replace('[' + regexname+'.param'+str(i+1) + ']', val)
#print 'first sec',the_value[the_keyl]
else:
val=None
if isinstance(obj,tuple):
try:
val=obj[i].decode('utf-8')
except:
val=obj[i]
else:
try:
val= obj.decode('utf-8')
except:
val= obj
if '[' + regexname+'.param'+str(i+1) + '][DE]' in the_value:
#print 'found DE',the_value.replace('[' + regexname+'.param'+str(i+1) + '][DE]', unescape(val))
the_value=the_value.replace('[' + regexname+'.param'+str(i+1) + '][DE]', unescape(val))
the_valueO[the_key]=the_value.replace('[' + regexname+'.param'+str(i+1) + ']', val)
#print 'second sec val',the_valueO[the_key]
val=None
if isinstance(obj,tuple):
try:
val=obj[i].decode('utf-8')
except:
val=obj[i]
else:
try:
val=obj.decode('utf-8')
except:
val=obj
if '[' + regexname+'.param'+str(i+1) + '][DE]' in listrepeatT:
listrepeatT=listrepeatT.replace('[' + regexname+'.param'+str(i+1) + '][DE]',val)
listrepeatT=listrepeatT.replace('[' + regexname+'.param'+str(i+1) + ']',escape(val))
# print listrepeatT
listrepeatT=listrepeatT.replace('[' + regexname+'.param'+str(0) + ']',str(rnumber))
#newcopy = urllib.quote(repr(newcopy))
# print 'new regex list', repr(newcopy), repr(listrepeatT)
# addLink(listlinkT,listtitleT.encode('utf-8', 'ignore'),listthumbnailT,'','','','',True,None,newcopy, len(ret))
regex_xml=''
# print 'newcopy',newcopy
if len(newcopy)>0:
regex_xml=d2x(newcopy,'lsproroot')
regex_xml=regex_xml.split('<lsproroot>')[1].split('</lsproroot')[0]
#ln+='\n<item>%s\n%s</item>'%(listrepeatT.encode("utf-8"),regex_xml)
try:
ln+='\n<item>%s\n%s</item>'%(listrepeatT,regex_xml)
except: ln+='\n<item>%s\n%s</item>'%(listrepeatT.encode("utf-8"),regex_xml)
except: traceback.print_exc(file=sys.stdout)
# print repr(ln)
# print newcopy
# ln+='</item>'
#print 'ln',ln
addon_log(repr(ln))
getData('','',ln)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
else:
url,setresolved = getRegexParsed(regexs, url)
#print repr(url),setresolved,'imhere'
if url:
if '$PLAYERPROXY$=' in url:
url,proxy=url.split('$PLAYERPROXY$=')
print 'proxy',proxy
#Jairox mod for proxy auth
proxyuser = None
proxypass = None
if len(proxy) > 0 and '@' in proxy:
proxy = proxy.split(':')
proxyuser = proxy[0]
proxypass = proxy[1].split('@')[0]
proxyip = proxy[1].split('@')[1]
port = proxy[2]
else:
proxyip,port=proxy.split(':')
playmediawithproxy(url,name,iconimage,proxyip,port, proxyuser,proxypass) #jairox
else:
playsetresolved(url,name,iconimage,setresolved,regexs)
else:
xbmc.executebuiltin("XBMC.Notification(pancas,Failed to extract regex. - "+"this"+",4000,"+icon+")")
elif mode==18:
addon_log("youtubedl")
try:
import youtubedl
except Exception:
xbmc.executebuiltin("XBMC.Notification(pancas,Please [COLOR yellow]install Youtube-dl[/COLOR] module ,10000,"")")
stream_url=youtubedl.single_YD(url)
playsetresolved(stream_url,name,iconimage)
elif mode==19:
temp=url.encode("utf-8")
if temp.find("openload") == -1:
addon_log("Genesiscommonresolvers")
tempww = request3(url)
##playsetresolved (tempww,name,iconimage,True)
playsetresolved (urlsolver(url),name,iconimage,True)
else:
addon_log("Cena Openload")
tempww = request3(url)
playsetresolved (tempww,name,iconimage,True)
elif mode==21:
addon_log("download current file using youtube-dl service")
ytdl_download('',name,'video')
elif mode==23:
addon_log("get info then download")
ytdl_download(url,name,'video')
elif mode==24:
addon_log("Audio only youtube download")
ytdl_download(url,name,'audio')
elif mode==25:
addon_log("Searchin Other plugins")
_search(url,name)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==55:
addon_log("enabled lock")
parentalblockedpin =addon.getSetting('parentalblockedpin')
keyboard = xbmc.Keyboard('','Enter Pin')
keyboard.doModal()
if not (keyboard.isConfirmed() == False):
newStr = keyboard.getText()
if newStr==parentalblockedpin:
addon.setSetting('parentalblocked', "false")
xbmc.executebuiltin("XBMC.Notification(pancas,Parental Block Disabled,5000,"+icon+")")
else:
xbmc.executebuiltin("XBMC.Notification(pancas,Wrong Pin??,5000,"+icon+")")
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==56:
addon_log("disable lock")
addon.setSetting('parentalblocked', "true")
xbmc.executebuiltin("XBMC.Notification(pancas,Parental block enabled,5000,"+icon+")")
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==53:
addon_log("Requesting JSON-RPC Items")
pluginquerybyJSON(url)
#xbmcplugin.endOfDirectory(int(sys.argv[1]))
if not viewmode==None:
print 'setting view mode'
xbmc.executebuiltin("Container.SetViewMode(%s)"%viewmode)
| gpl-2.0 |
todaychi/hue | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/pipchecker.py | 35 | 11303 | import os
import pip
import sys
import json
from distutils.version import LooseVersion
from django.core.management.base import NoArgsCommand
from django_extensions.management.color import color_style
from optparse import make_option
from pip.req import parse_requirements
from django_extensions.management.utils import signalcommand
try:
from urllib.parse import urlparse
from urllib.error import HTTPError
from urllib.request import Request, urlopen
from xmlrpc.client import ServerProxy
except ImportError:
# Python 2
from urlparse import urlparse
from urllib2 import HTTPError, Request, urlopen
from xmlrpclib import ServerProxy
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option(
"-t", "--github-api-token", action="store", dest="github_api_token",
help="A github api authentication token."
),
make_option(
"-r", "--requirement", action="append", dest="requirements",
default=[], metavar="FILENAME",
help="Check all the packages listed in the given requirements file. "
"This option can be used multiple times."
),
make_option(
"-n", "--newer", action="store_true", dest="show_newer",
help="Also show when newer version then available is installed."
),
)
help = "Scan pip requirement files for out-of-date packages."
@signalcommand
def handle_noargs(self, **options):
self.style = color_style()
self.options = options
if options["requirements"]:
req_files = options["requirements"]
elif os.path.exists("requirements.txt"):
req_files = ["requirements.txt"]
elif os.path.exists("requirements"):
req_files = ["requirements/{0}".format(f) for f in os.listdir("requirements")
if os.path.isfile(os.path.join("requirements", f)) and
f.lower().endswith(".txt")]
else:
sys.exit("requirements not found")
self.reqs = {}
for filename in req_files:
class Object(object):
pass
mockoptions = Object()
mockoptions.default_vcs = "git"
mockoptions.skip_requirements_regex = None
for req in parse_requirements(filename, options=mockoptions):
self.reqs[req.name] = {
"pip_req": req,
"url": req.url,
}
if options["github_api_token"]:
self.github_api_token = options["github_api_token"]
elif os.environ.get("GITHUB_API_TOKEN"):
self.github_api_token = os.environ.get("GITHUB_API_TOKEN")
else:
self.github_api_token = None # only 50 requests per hour
self.check_pypi()
if HAS_REQUESTS:
self.check_github()
else:
print(self.style.ERROR("Cannot check github urls. The requests library is not installed. ( pip install requests )"))
self.check_other()
def _urlopen_as_json(self, url, headers=None):
"""Shorcut for return contents as json"""
req = Request(url, headers=headers)
return json.loads(urlopen(req).read())
def check_pypi(self):
"""
If the requirement is frozen to pypi, check for a new version.
"""
for dist in pip.get_installed_distributions():
name = dist.project_name
if name in self.reqs.keys():
self.reqs[name]["dist"] = dist
pypi = ServerProxy("http://pypi.python.org/pypi")
for name, req in list(self.reqs.items()):
if req["url"]:
continue # skipping github packages.
elif "dist" in req:
dist = req["dist"]
dist_version = LooseVersion(dist.version)
available = pypi.package_releases(req["pip_req"].url_name)
try:
available_version = LooseVersion(available[0])
except IndexError:
available_version = None
if not available_version:
msg = self.style.WARN("release is not on pypi (check capitalization and/or --extra-index-url)")
elif self.options['show_newer'] and dist_version > available_version:
msg = self.style.INFO("{0} available (newer installed)".format(available_version))
elif available_version > dist_version:
msg = self.style.INFO("{0} available".format(available_version))
else:
msg = "up to date"
del self.reqs[name]
continue
pkg_info = self.style.BOLD("{dist.project_name} {dist.version}".format(dist=dist))
else:
msg = "not installed"
pkg_info = name
print("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
del self.reqs[name]
def check_github(self):
"""
If the requirement is frozen to a github url, check for new commits.
API Tokens
----------
For more than 50 github api calls per hour, pipchecker requires
authentication with the github api by settings the environemnt
variable ``GITHUB_API_TOKEN`` or setting the command flag
--github-api-token='mytoken'``.
To create a github api token for use at the command line::
curl -u 'rizumu' -d '{"scopes":["repo"], "note":"pipchecker"}' https://api.github.com/authorizations
For more info on github api tokens:
https://help.github.com/articles/creating-an-oauth-token-for-command-line-use
http://developer.github.com/v3/oauth/#oauth-authorizations-api
Requirement Format
------------------
Pipchecker gets the sha of frozen repo and checks if it is
found at the head of any branches. If it is not found then
the requirement is considered to be out of date.
Therefore, freezing at the commit hash will provide the expected
results, but if freezing at a branch or tag name, pipchecker will
not be able to determine with certainty if the repo is out of date.
Freeze at the commit hash (sha)::
git+git://github.com/django/django.git@393c268e725f5b229ecb554f3fac02cfc250d2df#egg=Django
Freeze with a branch name::
git+git://github.com/django/django.git@master#egg=Django
Freeze with a tag::
git+git://github.com/django/django.git@1.5b2#egg=Django
Do not freeze::
git+git://github.com/django/django.git#egg=Django
"""
for name, req in list(self.reqs.items()):
req_url = req["url"]
if not req_url:
continue
if req_url.startswith("git") and "github.com/" not in req_url:
continue
if req_url.endswith(".tar.gz") or req_url.endswith(".tar.bz2") or req_url.endswith(".zip"):
continue
headers = {
"content-type": "application/json",
}
if self.github_api_token:
headers["Authorization"] = "token {0}".format(self.github_api_token)
try:
user, repo = urlparse(req_url).path.split("#")[0].strip("/").rstrip("/").split("/")
except (ValueError, IndexError) as e:
print(self.style.ERROR("\nFailed to parse %r: %s\n" % (req_url, e)))
continue
try:
#test_auth = self._urlopen_as_json("https://api.github.com/django/", headers=headers)
test_auth = requests.get("https://api.github.com/django/", headers=headers).json()
except HTTPError as e:
print("\n%s\n" % str(e))
return
if "message" in test_auth and test_auth["message"] == "Bad credentials":
print(self.style.ERROR("\nGithub API: Bad credentials. Aborting!\n"))
return
elif "message" in test_auth and test_auth["message"].startswith("API Rate Limit Exceeded"):
print(self.style.ERROR("\nGithub API: Rate Limit Exceeded. Aborting!\n"))
return
frozen_commit_sha = None
if ".git" in repo:
repo_name, frozen_commit_full = repo.split(".git")
if frozen_commit_full.startswith("@"):
frozen_commit_sha = frozen_commit_full[1:]
elif "@" in repo:
repo_name, frozen_commit_sha = repo.split("@")
if frozen_commit_sha is None:
msg = self.style.ERROR("repo is not frozen")
if frozen_commit_sha:
branch_url = "https://api.github.com/repos/{0}/{1}/branches".format(user, repo_name)
#branch_data = self._urlopen_as_json(branch_url, headers=headers)
branch_data = requests.get(branch_url, headers=headers).json()
frozen_commit_url = "https://api.github.com/repos/{0}/{1}/commits/{2}".format(
user, repo_name, frozen_commit_sha
)
#frozen_commit_data = self._urlopen_as_json(frozen_commit_url, headers=headers)
frozen_commit_data = requests.get(frozen_commit_url, headers=headers).json()
if "message" in frozen_commit_data and frozen_commit_data["message"] == "Not Found":
msg = self.style.ERROR("{0} not found in {1}. Repo may be private.".format(frozen_commit_sha[:10], name))
elif frozen_commit_sha in [branch["commit"]["sha"] for branch in branch_data]:
msg = self.style.BOLD("up to date")
else:
msg = self.style.INFO("{0} is not the head of any branch".format(frozen_commit_data["sha"][:10]))
if "dist" in req:
pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"])
elif frozen_commit_sha is None:
pkg_info = name
else:
pkg_info = "{0} {1}".format(name, frozen_commit_sha[:10])
print("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
del self.reqs[name]
def check_other(self):
"""
If the requirement is frozen somewhere other than pypi or github, skip.
If you have a private pypi or use --extra-index-url, consider contributing
support here.
"""
if self.reqs:
print(self.style.ERROR("\nOnly pypi and github based requirements are supported:"))
for name, req in self.reqs.items():
if "dist" in req:
pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"])
elif "url" in req:
pkg_info = "{url}".format(url=req["url"])
else:
pkg_info = "unknown package"
print(self.style.BOLD("{pkg_info:40} is not a pypi or github requirement".format(pkg_info=pkg_info)))
| apache-2.0 |
pchauncey/ansible | lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py | 10 | 11276 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms.
- Metrics you wish to alarm on must already exist.
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
choices: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
choices: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
choices:
- 'Seconds'
- 'Microseconds'
- 'Milliseconds'
- 'Bytes'
- 'Kilobytes'
- 'Megabytes'
- 'Gigabytes'
- 'Terabytes'
- 'Bits'
- 'Kilobits'
- 'Megabits'
- 'Gigabits'
- 'Terabits'
- 'Percent'
- 'Count'
- 'Bytes/Second'
- 'Kilobytes/Second'
- 'Megabytes/Second'
- 'Gigabytes/Second'
- 'Terabytes/Second'
- 'Bits/Second'
- 'Kilobits/Second'
- 'Megabits/Second'
- 'Gigabits/Second'
- 'Terabits/Second'
- 'Count/Second'
- 'None'
description:
description:
- A longer description of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
'''
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import MetricAlarm
from boto.exception import BotoServerError, NoAuthHandlerFound
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = connection.describe_alarms(alarm_names=[name])
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
alarm = alarms[0]
changed = False
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed=True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
action = module.params.get(attr) or []
# Boto and/or ansible may provide same elements in lists but in different order.
# Compare on sets since they do not need any order.
if set(getattr(alarm, attr)) != set(action):
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError as e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = connection.describe_alarms(alarm_names=[name])
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes',
'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second',
'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second',
'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict', default={}),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
leonsooi/pymel | tests/TestPymel.py | 8 | 1122 | #!/usr/bin/env mayapy
import unittest
import os
"""
This module is for integrating pymel tests into a larger unittest framework.
If you just wish to test pymel, use pymel_test instead.
"""
import sys
import inspect
gCantRun = False
try:
import nose
except ImportError:
gCantRun = True
print('** nose module required for this test **')
if not gCantRun:
thisDir = os.path.dirname(inspect.getsourcefile( lambda:None ))
try:
import pymel_test
except ImportError:
sys.path.append(thisDir)
try:
import pymel_test
except ImportError:
gCantRun = True
import traceback
print('** error importing pymel_test: **')
traceback.print_exc()
if not gCantRun:
class TestPymel(unittest.TestCase):
pymelDir = os.path.dirname(thisDir)
def testPymel(self):
pymel_test.nose_test(pymelDir=self.pymelDir)
if __name__ == '__main__':
#from pymel import core
suite = unittest.TestLoader().loadTestsFromTestCase(TestPymel)
unittest.TextTestRunner(verbosity=2).run(suite) | bsd-3-clause |
divya-csekar/flask-microblog-server | flask/Lib/site-packages/werkzeug/testapp.py | 294 | 9398 | # -*- coding: utf-8 -*-
"""
werkzeug.testapp
~~~~~~~~~~~~~~~~
Provide a small test application that can be used to test a WSGI server
and check it for WSGI compliance.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import werkzeug
from textwrap import wrap
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.utils import escape
import base64
logo = Response(base64.b64decode(
'''R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
='''), mimetype='image/png')
TEMPLATE = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<title>WSGI Information</title>
<style type="text/css">
@import url(http://fonts.googleapis.com/css?family=Ubuntu);
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; background-color: white; color: #000;
font-size: 15px; text-align: center; }
#logo { float: right; padding: 0 0 10px 10px; }
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
background-color: white; }
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
h1 { margin: 0 0 30px 0; }
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
font-weight: normal; width: 18em; vertical-align: top;
padding: 0.5em 0 0.1em 0.5em; }
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 0.7em; }
ul li { line-height: 1.5em; }
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
list-style: none; background: #E8EFF0; }
ul.path li { line-height: 1.6em; }
li.virtual { color: #999; text-decoration: underline; }
li.exp { background: white; }
</style>
<div class="box">
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
<h1>WSGI Information</h1>
<p>
This page displays all available information about the WSGI server and
the underlying Python interpreter.
<h2 id="python-interpreter">Python Interpreter</h2>
<table>
<tr>
<th>Python Version
<td>%(python_version)s
<tr>
<th>Platform
<td>%(platform)s [%(os)s]
<tr>
<th>API Version
<td>%(api_version)s
<tr>
<th>Byteorder
<td>%(byteorder)s
<tr>
<th>Werkzeug Version
<td>%(werkzeug_version)s
</table>
<h2 id="wsgi-environment">WSGI Environment</h2>
<table>%(wsgi_env)s</table>
<h2 id="installed-eggs">Installed Eggs</h2>
<p>
The following python packages were installed on the system as
Python eggs:
<ul>%(python_eggs)s</ul>
<h2 id="sys-path">System Path</h2>
<p>
The following paths are the current contents of the load path. The
following entries are looked up for Python packages. Note that not
all items in this path are folders. Gray and underlined items are
entries pointing to invalid resources or used by custom import hooks
such as the zip importer.
<p>
Items with a bright background were expanded for display from a relative
path. If you encounter such paths in the output you might want to check
your setup as relative paths are usually problematic in multithreaded
environments.
<ul class="path">%(sys_path)s</ul>
</div>
'''
def iter_sys_path():
if os.name == 'posix':
def strip(x):
prefix = os.path.expanduser('~')
if x.startswith(prefix):
x = '~' + x[len(prefix):]
return x
else:
strip = lambda x: x
cwd = os.path.abspath(os.getcwd())
for item in sys.path:
path = os.path.join(cwd, item or os.path.curdir)
yield strip(os.path.normpath(path)), \
not os.path.isdir(path), path != item
def render_testapp(req):
try:
import pkg_resources
except ImportError:
eggs = ()
else:
eggs = sorted(pkg_resources.working_set,
key=lambda x: x.project_name.lower())
python_eggs = []
for egg in eggs:
try:
version = egg.version
except (ValueError, AttributeError):
version = 'unknown'
python_eggs.append('<li>%s <small>[%s]</small>' % (
escape(egg.project_name),
escape(version)
))
wsgi_env = []
sorted_environ = sorted(req.environ.items(),
key=lambda x: repr(x[0]).lower())
for key, value in sorted_environ:
wsgi_env.append('<tr><th>%s<td><code>%s</code>' % (
escape(str(key)),
' '.join(wrap(escape(repr(value))))
))
sys_path = []
for item, virtual, expanded in iter_sys_path():
class_ = []
if virtual:
class_.append('virtual')
if expanded:
class_.append('exp')
sys_path.append('<li%s>%s' % (
class_ and ' class="%s"' % ' '.join(class_) or '',
escape(item)
))
return (TEMPLATE % {
'python_version': '<br>'.join(escape(sys.version).splitlines()),
'platform': escape(sys.platform),
'os': escape(os.name),
'api_version': sys.api_version,
'byteorder': sys.byteorder,
'werkzeug_version': werkzeug.__version__,
'python_eggs': '\n'.join(python_eggs),
'wsgi_env': '\n'.join(wsgi_env),
'sys_path': '\n'.join(sys_path)
}).encode('utf-8')
def test_app(environ, start_response):
"""Simple test application that dumps the environment. You can use
it to check if Werkzeug is working properly:
.. sourcecode:: pycon
>>> from werkzeug.serving import run_simple
>>> from werkzeug.testapp import test_app
>>> run_simple('localhost', 3000, test_app)
* Running on http://localhost:3000/
The application displays important information from the WSGI environment,
the Python interpreter and the installed libraries.
"""
req = Request(environ, populate_request=False)
if req.args.get('resource') == 'logo':
response = logo
else:
response = Response(render_testapp(req), mimetype='text/html')
return response(environ, start_response)
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple('localhost', 5000, test_app, use_reloader=True)
| bsd-3-clause |
eayunstack/nova | nova/virt/xenapi/network_utils.py | 86 | 1926 | # Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of network
records and their attributes like bridges, PIFs, QoS, as well as
their lookup functions.
"""
from nova import exception
from nova.i18n import _
def find_network_with_name_label(session, name_label):
networks = session.network.get_by_name_label(name_label)
if len(networks) == 1:
return networks[0]
elif len(networks) > 1:
raise exception.NovaException(
_('Found non-unique network for name_label %s') %
name_label)
else:
return None
def find_network_with_bridge(session, bridge):
"""Return the network on which the bridge is attached, if found.
The bridge is defined in the nova db and can be found either in the
'bridge' or 'name_label' fields of the XenAPI network record.
"""
expr = ('field "name__label" = "%s" or field "bridge" = "%s"' %
(bridge, bridge))
networks = session.network.get_all_records_where(expr)
if len(networks) == 1:
return networks.keys()[0]
elif len(networks) > 1:
raise exception.NovaException(
_('Found non-unique network for bridge %s') % bridge)
else:
raise exception.NovaException(
_('Found no network for bridge %s') % bridge)
| apache-2.0 |
sasukeh/neutron | neutron/tests/unit/quota/test_resource_registry.py | 15 | 7717 | # Copyright (c) 2015 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from neutron import context
from neutron.quota import resource
from neutron.quota import resource_registry
from neutron.tests import base
from neutron.tests.unit import quota as test_quota
class TestResourceRegistry(base.DietTestCase):
def setUp(self):
super(TestResourceRegistry, self).setUp()
self.registry = resource_registry.ResourceRegistry.get_instance()
# clean up the registry at every test
self.registry.unregister_resources()
def test_set_tracked_resource_new_resource(self):
self.registry.set_tracked_resource('meh', test_quota.MehModel)
self.assertEqual(test_quota.MehModel,
self.registry._tracked_resource_mappings['meh'])
def test_set_tracked_resource_existing_with_override(self):
self.test_set_tracked_resource_new_resource()
self.registry.set_tracked_resource('meh', test_quota.OtherMehModel,
override=True)
# Override is set to True, the model class should change
self.assertEqual(test_quota.OtherMehModel,
self.registry._tracked_resource_mappings['meh'])
def test_set_tracked_resource_existing_no_override(self):
self.test_set_tracked_resource_new_resource()
self.registry.set_tracked_resource('meh', test_quota.OtherMehModel)
# Override is set to false, the model class should not change
self.assertEqual(test_quota.MehModel,
self.registry._tracked_resource_mappings['meh'])
def _test_register_resource_by_name(self, resource_name, expected_type):
self.assertNotIn(resource_name, self.registry._resources)
self.registry.register_resource_by_name(resource_name)
self.assertIn(resource_name, self.registry._resources)
self.assertIsInstance(self.registry.get_resource(resource_name),
expected_type)
def test_register_resource_by_name_tracked(self):
self.test_set_tracked_resource_new_resource()
self._test_register_resource_by_name('meh', resource.TrackedResource)
def test_register_resource_by_name_not_tracked(self):
self._test_register_resource_by_name('meh', resource.CountableResource)
def test_register_resource_by_name_with_tracking_disabled_by_config(self):
cfg.CONF.set_override('track_quota_usage', False,
group='QUOTAS')
# DietTestCase does not automatically cleans configuration overrides
self.addCleanup(cfg.CONF.reset)
self.registry.set_tracked_resource('meh', test_quota.MehModel)
self.assertNotIn(
'meh', self.registry._tracked_resource_mappings)
self._test_register_resource_by_name('meh', resource.CountableResource)
class TestAuxiliaryFunctions(base.DietTestCase):
def setUp(self):
super(TestAuxiliaryFunctions, self).setUp()
self.registry = resource_registry.ResourceRegistry.get_instance()
# clean up the registry at every test
self.registry.unregister_resources()
def test_resync_tracking_disabled(self):
cfg.CONF.set_override('track_quota_usage', False,
group='QUOTAS')
# DietTestCase does not automatically cleans configuration overrides
self.addCleanup(cfg.CONF.reset)
with mock.patch('neutron.quota.resource.'
'TrackedResource.resync') as mock_resync:
self.registry.set_tracked_resource('meh', test_quota.MehModel)
self.registry.register_resource_by_name('meh')
resource_registry.resync_resource(mock.ANY, 'meh', 'tenant_id')
self.assertEqual(0, mock_resync.call_count)
def test_resync_tracked_resource(self):
with mock.patch('neutron.quota.resource.'
'TrackedResource.resync') as mock_resync:
self.registry.set_tracked_resource('meh', test_quota.MehModel)
self.registry.register_resource_by_name('meh')
resource_registry.resync_resource(mock.ANY, 'meh', 'tenant_id')
mock_resync.assert_called_once_with(mock.ANY, 'tenant_id')
def test_resync_non_tracked_resource(self):
with mock.patch('neutron.quota.resource.'
'TrackedResource.resync') as mock_resync:
self.registry.register_resource_by_name('meh')
resource_registry.resync_resource(mock.ANY, 'meh', 'tenant_id')
self.assertEqual(0, mock_resync.call_count)
def test_set_resources_dirty_invoked_with_tracking_disabled(self):
cfg.CONF.set_override('track_quota_usage', False,
group='QUOTAS')
# DietTestCase does not automatically cleans configuration overrides
self.addCleanup(cfg.CONF.reset)
with mock.patch('neutron.quota.resource.'
'TrackedResource.mark_dirty') as mock_mark_dirty:
self.registry.set_tracked_resource('meh', test_quota.MehModel)
self.registry.register_resource_by_name('meh')
resource_registry.set_resources_dirty(mock.ANY)
self.assertEqual(0, mock_mark_dirty.call_count)
def test_set_resources_dirty_no_dirty_resource(self):
ctx = context.Context('user_id', 'tenant_id',
is_admin=False, is_advsvc=False)
with mock.patch('neutron.quota.resource.'
'TrackedResource.mark_dirty') as mock_mark_dirty:
self.registry.set_tracked_resource('meh', test_quota.MehModel)
self.registry.register_resource_by_name('meh')
res = self.registry.get_resource('meh')
# This ensures dirty is false
res._dirty_tenants.clear()
resource_registry.set_resources_dirty(ctx)
self.assertEqual(0, mock_mark_dirty.call_count)
def test_set_resources_dirty_no_tracked_resource(self):
ctx = context.Context('user_id', 'tenant_id',
is_admin=False, is_advsvc=False)
with mock.patch('neutron.quota.resource.'
'TrackedResource.mark_dirty') as mock_mark_dirty:
self.registry.register_resource_by_name('meh')
resource_registry.set_resources_dirty(ctx)
self.assertEqual(0, mock_mark_dirty.call_count)
def test_set_resources_dirty(self):
ctx = context.Context('user_id', 'tenant_id',
is_admin=False, is_advsvc=False)
with mock.patch('neutron.quota.resource.'
'TrackedResource.mark_dirty') as mock_mark_dirty:
self.registry.set_tracked_resource('meh', test_quota.MehModel)
self.registry.register_resource_by_name('meh')
res = self.registry.get_resource('meh')
# This ensures dirty is true
res._dirty_tenants.add('tenant_id')
resource_registry.set_resources_dirty(ctx)
mock_mark_dirty.assert_called_once_with(ctx)
| apache-2.0 |
Dapid/scipy | scipy/special/tests/test_lambertw.py | 30 | 3946 | #
# Tests for the lambertw function,
# Adapted from the MPMath tests [1] by Yosef Meller, mellerf@netvision.net.il
# Distributed under the same license as SciPy itself.
#
# [1] mpmath source code, Subversion revision 992
# http://code.google.com/p/mpmath/source/browse/trunk/mpmath/tests/test_functions2.py?spec=svn994&r=992
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, assert_equal, assert_array_almost_equal
from scipy.special import lambertw
from numpy import nan, inf, pi, e, isnan, log, r_, array, complex_
from scipy.special._testutils import FuncData
def test_values():
assert_(isnan(lambertw(nan)))
assert_equal(lambertw(inf,1).real, inf)
assert_equal(lambertw(inf,1).imag, 2*pi)
assert_equal(lambertw(-inf,1).real, inf)
assert_equal(lambertw(-inf,1).imag, 3*pi)
assert_equal(lambertw(1.), lambertw(1., 0))
data = [
(0,0, 0),
(0+0j,0, 0),
(inf,0, inf),
(0,-1, -inf),
(0,1, -inf),
(0,3, -inf),
(e,0, 1),
(1,0, 0.567143290409783873),
(-pi/2,0, 1j*pi/2),
(-log(2)/2,0, -log(2)),
(0.25,0, 0.203888354702240164),
(-0.25,0, -0.357402956181388903),
(-1./10000,0, -0.000100010001500266719),
(-0.25,-1, -2.15329236411034965),
(0.25,-1, -3.00899800997004620-4.07652978899159763j),
(-0.25,-1, -2.15329236411034965),
(0.25,1, -3.00899800997004620+4.07652978899159763j),
(-0.25,1, -3.48973228422959210+7.41405453009603664j),
(-4,0, 0.67881197132094523+1.91195078174339937j),
(-4,1, -0.66743107129800988+7.76827456802783084j),
(-4,-1, 0.67881197132094523-1.91195078174339937j),
(1000,0, 5.24960285240159623),
(1000,1, 4.91492239981054535+5.44652615979447070j),
(1000,-1, 4.91492239981054535-5.44652615979447070j),
(1000,5, 3.5010625305312892+29.9614548941181328j),
(3+4j,0, 1.281561806123775878+0.533095222020971071j),
(-0.4+0.4j,0, -0.10396515323290657+0.61899273315171632j),
(3+4j,1, -0.11691092896595324+5.61888039871282334j),
(3+4j,-1, 0.25856740686699742-3.85211668616143559j),
(-0.5,-1, -0.794023632344689368-0.770111750510379110j),
(-1./10000,1, -11.82350837248724344+6.80546081842002101j),
(-1./10000,-1, -11.6671145325663544),
(-1./10000,-2, -11.82350837248724344-6.80546081842002101j),
(-1./100000,4, -14.9186890769540539+26.1856750178782046j),
(-1./100000,5, -15.0931437726379218666+32.5525721210262290086j),
((2+1j)/10,0, 0.173704503762911669+0.071781336752835511j),
((2+1j)/10,1, -3.21746028349820063+4.56175438896292539j),
((2+1j)/10,-1, -3.03781405002993088-3.53946629633505737j),
((2+1j)/10,4, -4.6878509692773249+23.8313630697683291j),
(-(2+1j)/10,0, -0.226933772515757933-0.164986470020154580j),
(-(2+1j)/10,1, -2.43569517046110001+0.76974067544756289j),
(-(2+1j)/10,-1, -3.54858738151989450-6.91627921869943589j),
(-(2+1j)/10,4, -4.5500846928118151+20.6672982215434637j),
(pi,0, 1.073658194796149172092178407024821347547745350410314531),
# Former bug in generated branch,
(-0.5+0.002j,0, -0.78917138132659918344 + 0.76743539379990327749j),
(-0.5-0.002j,0, -0.78917138132659918344 - 0.76743539379990327749j),
(-0.448+0.4j,0, -0.11855133765652382241 + 0.66570534313583423116j),
(-0.448-0.4j,0, -0.11855133765652382241 - 0.66570534313583423116j),
]
data = array(data, dtype=complex_)
def w(x, y):
return lambertw(x, y.real.astype(int))
olderr = np.seterr(all='ignore')
try:
FuncData(w, data, (0,1), 2, rtol=1e-10, atol=1e-13).check()
finally:
np.seterr(**olderr)
def test_ufunc():
assert_array_almost_equal(
lambertw(r_[0., e, 1.]), r_[0., 1., 0.567143290409783873])
| bsd-3-clause |
goodwinnk/intellij-community | python/lib/Lib/site-packages/django/views/generic/create_update.py | 245 | 8935 | from django.forms.models import ModelFormMetaclass, ModelForm
from django.template import RequestContext, loader
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.core.xheaders import populate_xheaders
from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured
from django.utils.translation import ugettext
from django.contrib.auth.views import redirect_to_login
from django.views.generic import GenericViewError
from django.contrib import messages
import warnings
warnings.warn(
'Function-based generic views have been deprecated; use class-based views instead.',
PendingDeprecationWarning
)
def apply_extra_context(extra_context, context):
"""
Adds items from extra_context dict to context. If a value in extra_context
is callable, then it is called and the result is added to context.
"""
for key, value in extra_context.iteritems():
if callable(value):
context[key] = value()
else:
context[key] = value
def get_model_and_form_class(model, form_class):
"""
Returns a model and form class based on the model and form_class
parameters that were passed to the generic view.
If ``form_class`` is given then its associated model will be returned along
with ``form_class`` itself. Otherwise, if ``model`` is given, ``model``
itself will be returned along with a ``ModelForm`` class created from
``model``.
"""
if form_class:
return form_class._meta.model, form_class
if model:
# The inner Meta class fails if model = model is used for some reason.
tmp_model = model
# TODO: we should be able to construct a ModelForm without creating
# and passing in a temporary inner class.
class Meta:
model = tmp_model
class_name = model.__name__ + 'Form'
form_class = ModelFormMetaclass(class_name, (ModelForm,), {'Meta': Meta})
return model, form_class
raise GenericViewError("Generic view must be called with either a model or"
" form_class argument.")
def redirect(post_save_redirect, obj):
"""
Returns a HttpResponseRedirect to ``post_save_redirect``.
``post_save_redirect`` should be a string, and can contain named string-
substitution place holders of ``obj`` field names.
If ``post_save_redirect`` is None, then redirect to ``obj``'s URL returned
by ``get_absolute_url()``. If ``obj`` has no ``get_absolute_url`` method,
then raise ImproperlyConfigured.
This function is meant to handle the post_save_redirect parameter to the
``create_object`` and ``update_object`` views.
"""
if post_save_redirect:
return HttpResponseRedirect(post_save_redirect % obj.__dict__)
elif hasattr(obj, 'get_absolute_url'):
return HttpResponseRedirect(obj.get_absolute_url())
else:
raise ImproperlyConfigured(
"No URL to redirect to. Either pass a post_save_redirect"
" parameter to the generic view or define a get_absolute_url"
" method on the Model.")
def lookup_object(model, object_id, slug, slug_field):
"""
Return the ``model`` object with the passed ``object_id``. If
``object_id`` is None, then return the object whose ``slug_field``
equals the passed ``slug``. If ``slug`` and ``slug_field`` are not passed,
then raise Http404 exception.
"""
lookup_kwargs = {}
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise GenericViewError(
"Generic view must be called with either an object_id or a"
" slug/slug_field.")
try:
return model.objects.get(**lookup_kwargs)
except ObjectDoesNotExist:
raise Http404("No %s found for %s"
% (model._meta.verbose_name, lookup_kwargs))
def create_object(request, model=None, template_name=None,
template_loader=loader, extra_context=None, post_save_redirect=None,
login_required=False, context_processors=None, form_class=None):
"""
Generic object-creation function.
Templates: ``<app_label>/<model_name>_form.html``
Context:
form
the form for the object
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
new_object = form.save()
msg = ugettext("The %(verbose_name)s was created successfully.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, new_object)
else:
form = form_class()
# Create the template, context, response
if not template_name:
template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
}, context_processors)
apply_extra_context(extra_context, c)
return HttpResponse(t.render(c))
def update_object(request, model=None, object_id=None, slug=None,
slug_field='slug', template_name=None, template_loader=loader,
extra_context=None, post_save_redirect=None, login_required=False,
context_processors=None, template_object_name='object',
form_class=None):
"""
Generic object-update function.
Templates: ``<app_label>/<model_name>_form.html``
Context:
form
the form for the object
object
the original object being edited
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
obj = lookup_object(model, object_id, slug, slug_field)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=obj)
if form.is_valid():
obj = form.save()
msg = ugettext("The %(verbose_name)s was updated successfully.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, obj)
else:
form = form_class(instance=obj)
if not template_name:
template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
template_object_name: obj,
}, context_processors)
apply_extra_context(extra_context, c)
response = HttpResponse(t.render(c))
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname))
return response
def delete_object(request, model, post_delete_redirect, object_id=None,
slug=None, slug_field='slug', template_name=None,
template_loader=loader, extra_context=None, login_required=False,
context_processors=None, template_object_name='object'):
"""
Generic object-delete function.
The given template will be used to confirm deletetion if this view is
fetched using GET; for safty, deletion will only be performed if this
view is POSTed.
Templates: ``<app_label>/<model_name>_confirm_delete.html``
Context:
object
the original object being deleted
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
obj = lookup_object(model, object_id, slug, slug_field)
if request.method == 'POST':
obj.delete()
msg = ugettext("The %(verbose_name)s was deleted.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return HttpResponseRedirect(post_delete_redirect)
else:
if not template_name:
template_name = "%s/%s_confirm_delete.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
apply_extra_context(extra_context, c)
response = HttpResponse(t.render(c))
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname))
return response
| apache-2.0 |
kamalx/edx-platform | cms/envs/dev_with_worker.py | 127 | 1180 | """
This config file follows the dev enviroment, but adds the
requirement of a celery worker running in the background to process
celery tasks.
The worker can be executed using:
django_admin.py celery worker
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from dev import *
################################# CELERY ######################################
# Requires a separate celery worker
CELERY_ALWAYS_EAGER = False
# Use django db as the broker and result store
BROKER_URL = 'django://'
INSTALLED_APPS += ('djcelery.transport', )
CELERY_RESULT_BACKEND = 'database'
DJKOMBU_POLLING_INTERVAL = 1.0
# Disable transaction management because we are using a worker. Views
# that request a task and wait for the result will deadlock otherwise.
MIDDLEWARE_CLASSES = tuple(
c for c in MIDDLEWARE_CLASSES
if c != 'django.middleware.transaction.TransactionMiddleware')
# Note: other alternatives for disabling transactions don't work in 1.4
# https://code.djangoproject.com/ticket/2304
# https://code.djangoproject.com/ticket/16039
| agpl-3.0 |
pablodanielrey/python | dhcp/dhcp-event.py | 1 | 1622 | #!/usr/bin/env python
import psycopg2
import os, sys
host = ''
#ClientIP, ClientMac, host-decl-name
if (len(sys.argv) > 1):
command = sys.argv[1]
clientIP = sys.argv[2]
clientMac = sys.argv[3]
hostname = sys.argv[4]
if command == "commit":
f = open("/tmp/leases",'a')
s = "Leased: %s to %s\n" % (clientIP, hostname)
f.write(s)
f.flush()
f.close()
db = psycopg2.connect(host=host, user="dhcp", password="dhcp", dbname="dhcp")
cursor = db.cursor()
cursor.execute("delete from leases where ip = %s",[clientIP])
cursor.execute("INSERT INTO leases (ip,mac,hostname) VALUES (%s,%s,%s)", [clientIP,clientMac,hostname])
# pp.pprint(cursor.__dict__)
cursor.close()
db.commit()
db.close()
elif command == "release":
f = open("/tmp/leases",'a')
s = "Released: %s from %s\n" % (clientIP, hostname)
f.write(s)
f.flush()
f.close()
db = psycopg2.connect(host=host, user="dhcp", password="dhcp", dbname="dhcp")
cursor = db.cursor()
cursor.execute("DELETE FROM records WHERE content = %s AND name = %s",[clientIP,hostname])
#pp.pprint(cursor.__dict__)
db.commit()
db.close()
elif command == "expiry":
f = open("/tmp/leases",'a')
s = "Expired: %s from %s\n" % (clientIP, hostname)
f.write(s)
f.flush()
f.close()
db = psycopg2.connect(host=host, user="dhcp", password="dhcp", dbname="dhcp")
cursor = db.cursor()
cursor.execute("DELETE FROM records WHERE content = %s AND name = %s",[clientIP,hostname])
#pp.pprint(cursor.__dict__)
db.commit()
db.close()
| gpl-3.0 |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-1.2/django/contrib/messages/storage/cookie.py | 65 | 5862 | import hmac
from django.conf import settings
from django.contrib.messages import constants
from django.contrib.messages.storage.base import BaseStorage, Message
from django.http import CompatCookie
from django.utils import simplejson as json
from django.utils.hashcompat import sha_hmac
class MessageEncoder(json.JSONEncoder):
"""
Compactly serializes instances of the ``Message`` class as JSON.
"""
message_key = '__json_message'
def default(self, obj):
if isinstance(obj, Message):
message = [self.message_key, obj.level, obj.message]
if obj.extra_tags:
message.append(obj.extra_tags)
return message
return super(MessageEncoder, self).default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decodes JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
return Message(*obj[1:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return dict([(key, self.process_messages(value))
for key, value in obj.iteritems()])
return obj
def decode(self, s, **kwargs):
decoded = super(MessageDecoder, self).decode(s, **kwargs)
return self.process_messages(decoded)
class CookieStorage(BaseStorage):
"""
Stores messages in a cookie.
"""
cookie_name = 'messages'
# We should be able to store 4K in a cookie, but Internet Explorer
# imposes 4K as the *total* limit for a domain. To allow other
# cookies, we go for 3/4 of 4K.
max_cookie_size = 3072
not_finished = '__messagesnotfinished__'
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either sets the cookie with the encoded data if there is any data to
store, or deletes the cookie.
"""
if encoded_data:
response.set_cookie(self.cookie_name, encoded_data)
else:
response.delete_cookie(self.cookie_name)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Stores the messages to a cookie, returning a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, removes
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# data is going to be stored eventually by CompatCookie, which
# adds it's own overhead, which we must account for.
cookie = CompatCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages
def _hash(self, value):
"""
Creates an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
key = 'django.contrib.messages' + settings.SECRET_KEY
return hmac.new(key, value, sha_hmac).hexdigest()
def _encode(self, messages, encode_empty=False):
"""
Returns an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
return '%s$%s' % (self._hash(value), value)
def _decode(self, data):
"""
Safely decodes a encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, ``None`` is returned.
"""
if not data:
return None
bits = data.split('$', 1)
if len(bits) == 2:
hash, value = bits
if hash == self._hash(value):
try:
# If we get here (and the JSON decode works), everything is
# good. In any other case, drop back and return None.
return json.loads(value, cls=MessageDecoder)
except ValueError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
| lgpl-3.0 |
abacuspix/NFV_project | Mastering Flask_Code Bundle/chapter_7/webapp/extensions.py | 4 | 2194 | from flask import flash, redirect, url_for, session
from flask.ext.bcrypt import Bcrypt
from flask.ext.openid import OpenID
from flask_oauth import OAuth
from flask.ext.login import LoginManager
from flask.ext.principal import Principal, Permission, RoleNeed
bcrypt = Bcrypt()
oid = OpenID()
oauth = OAuth()
principals = Principal()
admin_permission = Permission(RoleNeed('admin'))
poster_permission = Permission(RoleNeed('poster'))
default_permission = Permission(RoleNeed('default'))
login_manager = LoginManager()
login_manager.login_view = "main.login"
login_manager.session_protection = "strong"
login_manager.login_message = "Please login to access this page"
login_manager.login_message_category = "info"
@login_manager.user_loader
def load_user(userid):
from models import User
return User.query.get(userid)
@oid.after_login
def create_or_login(resp):
from models import db, User
username = resp.fullname or resp.nickname or resp.email
if not username:
flash('Invalid login. Please try again.', 'danger')
return redirect(url_for('main.login'))
user = User.query.filter_by(username=username).first()
if user is None:
user = User(username)
db.session.add(user)
db.session.commit()
session['username'] = username
return redirect(url_for('blog.home'))
facebook = oauth.remote_app(
'facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key='',
consumer_secret='',
request_token_params={'scope': 'email'}
)
twitter = oauth.remote_app(
'twitter',
base_url='https://api.twitter.com/1.1/',
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authenticate',
consumer_key='',
consumer_secret=''
)
@facebook.tokengetter
def get_facebook_oauth_token():
return session.get('facebook_oauth_token')
@twitter.tokengetter
def get_twitter_oauth_token():
return session.get('twitter_oauth_token')
| mit |
kuiwei/kuiwei | lms/djangoapps/instructor/tests/test_hint_manager.py | 30 | 9267 | import json
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from mock import patch, MagicMock
from courseware.models import XModuleUserStateSummaryField
from courseware.tests.factories import UserStateSummaryFactory
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
import instructor.hint_manager as view
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class HintManagerTest(ModuleStoreTestCase):
def setUp(self):
"""
Makes a course, which will be the same for all tests.
Set up mako middleware, which is necessary for template rendering to happen.
"""
self.course = CourseFactory.create(org='Me', number='19.002', display_name='test_course')
self.url = '/courses/Me/19.002/test_course/hint_manager'
self.user = UserFactory.create(username='robot', email='robot@edx.org', password='test', is_staff=True)
self.c = Client()
self.c.login(username='robot', password='test')
self.course_id = self.course.id
self.problem_id = self.course_id.make_usage_key('crowdsource_hinter', 'crowdsource_hinter_001')
UserStateSummaryFactory.create(field_name='hints',
usage_id=self.problem_id,
value=json.dumps({'1.0': {'1': ['Hint 1', 2],
'3': ['Hint 3', 12]},
'2.0': {'4': ['Hint 4', 3]}
}))
UserStateSummaryFactory.create(field_name='mod_queue',
usage_id=self.problem_id,
value=json.dumps({'2.0': {'2': ['Hint 2', 1]}}))
UserStateSummaryFactory.create(field_name='hint_pk',
usage_id=self.problem_id,
value=5)
# Mock out location_to_problem_name, which ordinarily accesses the modulestore.
# (I can't figure out how to get fake structures into the modulestore.)
view.location_to_problem_name = lambda course_id, loc: "Test problem"
def test_student_block(self):
"""
Makes sure that students cannot see the hint management view.
"""
c = Client()
UserFactory.create(username='student', email='student@edx.org', password='test')
c.login(username='student', password='test')
out = c.get(self.url)
print out
self.assertTrue('Sorry, but students are not allowed to access the hint manager!' in out.content)
def test_staff_access(self):
"""
Makes sure that staff can access the hint management view.
"""
out = self.c.get(self.url)
print out
self.assertTrue('Hints Awaiting Moderation' in out.content)
def test_invalid_field_access(self):
"""
Makes sure that field names other than 'mod_queue' and 'hints' are
rejected.
"""
out = self.c.post(self.url, {'op': 'delete hints', 'field': 'all your private data'})
print out
self.assertTrue('an invalid field was accessed' in out.content)
def test_switchfields(self):
"""
Checks that the op: 'switch fields' POST request works.
"""
out = self.c.post(self.url, {'op': 'switch fields', 'field': 'mod_queue'})
print out
self.assertTrue('Hint 2' in out.content)
def test_gethints(self):
"""
Checks that gethints returns the right data.
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'mod_queue'})
out = view.get_hints(post, self.course_id, 'mod_queue')
print out
self.assertTrue(out['other_field'] == 'hints')
expected = {self.problem_id: [(u'2.0', {u'2': [u'Hint 2', 1]})]}
self.assertTrue(out['all_hints'] == expected)
def test_gethints_other(self):
"""
Same as above, with hints instead of mod_queue
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'hints'})
out = view.get_hints(post, self.course_id, 'hints')
print out
self.assertTrue(out['other_field'] == 'mod_queue')
expected = {self.problem_id: [('1.0', {'1': ['Hint 1', 2],
'3': ['Hint 3', 12]}),
('2.0', {'4': ['Hint 4', 3]})
]}
self.assertTrue(out['all_hints'] == expected)
def test_deletehints(self):
"""
Checks that delete_hints deletes the right stuff.
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'hints',
'op': 'delete hints',
1: [self.problem_id.to_deprecated_string(), '1.0', '1']})
view.delete_hints(post, self.course_id, 'hints')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value
self.assertTrue('1' not in json.loads(problem_hints)['1.0'])
def test_changevotes(self):
"""
Checks that vote changing works.
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'hints',
'op': 'change votes',
1: [self.problem_id.to_deprecated_string(), '1.0', '1', 5]})
view.change_votes(post, self.course_id, 'hints')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value
# hints[answer][hint_pk (string)] = [hint text, vote count]
print json.loads(problem_hints)['1.0']['1']
self.assertTrue(json.loads(problem_hints)['1.0']['1'][1] == 5)
def test_addhint(self):
"""
Check that instructors can add new hints.
"""
# Because add_hint accesses the xmodule, this test requires a bunch
# of monkey patching.
hinter = MagicMock()
hinter.validate_answer = lambda string: True
request = RequestFactory()
post = request.post(self.url, {'field': 'mod_queue',
'op': 'add hint',
'problem': self.problem_id.to_deprecated_string(),
'answer': '3.14',
'hint': 'This is a new hint.'})
post.user = 'fake user'
with patch('courseware.module_render.get_module', MagicMock(return_value=hinter)):
with patch('courseware.model_data.FieldDataCache', MagicMock(return_value=None)):
view.add_hint(post, self.course_id, 'mod_queue')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='mod_queue', usage_id=self.problem_id).value
self.assertTrue('3.14' in json.loads(problem_hints))
def test_addbadhint(self):
"""
Check that instructors cannot add hints with unparsable answers.
"""
# Patching.
hinter = MagicMock()
hinter.validate_answer = lambda string: False
request = RequestFactory()
post = request.post(self.url, {'field': 'mod_queue',
'op': 'add hint',
'problem': self.problem_id.to_deprecated_string(),
'answer': 'fish',
'hint': 'This is a new hint.'})
post.user = 'fake user'
with patch('courseware.module_render.get_module', MagicMock(return_value=hinter)):
with patch('courseware.model_data.FieldDataCache', MagicMock(return_value=None)):
view.add_hint(post, self.course_id, 'mod_queue')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='mod_queue', usage_id=self.problem_id).value
self.assertTrue('fish' not in json.loads(problem_hints))
def test_approve(self):
"""
Check that instructors can approve hints. (Move them
from the mod_queue to the hints.)
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'mod_queue',
'op': 'approve',
1: [self.problem_id.to_deprecated_string(), '2.0', '2']})
view.approve(post, self.course_id, 'mod_queue')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='mod_queue', usage_id=self.problem_id).value
self.assertTrue('2.0' not in json.loads(problem_hints) or len(json.loads(problem_hints)['2.0']) == 0)
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value
self.assertTrue(json.loads(problem_hints)['2.0']['2'] == ['Hint 2', 1])
self.assertTrue(len(json.loads(problem_hints)['2.0']) == 2)
| agpl-3.0 |
odoousers2014/odoo_addons-2 | clv_person/wkf/__init__.py | 2 | 1426 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import clv_person_wkf
| agpl-3.0 |
AunShiLord/sympy | sympy/physics/quantum/tests/test_fermion.py | 113 | 1129 | from sympy.physics.quantum import Dagger, AntiCommutator, qapply
from sympy.physics.quantum.fermion import FermionOp
from sympy.physics.quantum.fermion import FermionFockKet, FermionFockBra
def test_fermionoperator():
c = FermionOp('c')
d = FermionOp('d')
assert isinstance(c, FermionOp)
assert isinstance(Dagger(c), FermionOp)
assert c.is_annihilation
assert not Dagger(c).is_annihilation
assert FermionOp("c") == FermionOp("c")
assert FermionOp("c") != FermionOp("d")
assert FermionOp("c", True) != FermionOp("c", False)
assert AntiCommutator(c, Dagger(c)).doit() == 1
assert AntiCommutator(c, Dagger(d)).doit() == c * Dagger(d) + Dagger(d) * c
def test_fermion_states():
c = FermionOp("c")
# Fock states
assert (FermionFockBra(0) * FermionFockKet(1)).doit() == 0
assert (FermionFockBra(1) * FermionFockKet(1)).doit() == 1
assert qapply(c * FermionFockKet(1)) == FermionFockKet(0)
assert qapply(c * FermionFockKet(0)) == 0
assert qapply(Dagger(c) * FermionFockKet(0)) == FermionFockKet(1)
assert qapply(Dagger(c) * FermionFockKet(1)) == 0
| bsd-3-clause |
rustychris/stompy | stompy/io/local/usgs_nwis.py | 1 | 9091 | import datetime
import os
import logging
import re
import six
from six.moves import cPickle
import numpy as np
import xarray as xr
import pandas as pd
import requests
log=logging.getLogger('usgs_nwis')
from ... import utils
from .. import rdb
from .common import periods
try:
import seawater
except ImportError:
seawater=None
def nwis_dataset_collection(stations,*a,**k):
"""
Fetch from multiple stations, glue together to a combined dataset.
The rest of the options are the same as for nwis_dataset().
Stations for which no data was found are omitted in the results.
"""
ds_per_site=[]
for station in stations:
ds=nwis_dataset(station,*a,**k)
if ds is None:
continue
ds['site']=('site',),[station]
ds_per_site.append(ds)
# And now glue those all together, but no filling of gaps yet.
# As cases of missing data come up, this will have to get smarter about padding
# individual sites.
if len(ds_per_site)==0:
# Annoying, but if no stations exist, just return None
return None
collection=xr.concat( ds_per_site, dim='site')
for ds in ds_per_site:
ds.close() # free up FDs
return collection
def nwis_dataset(station,start_date,end_date,products,
days_per_request='M',frequency='realtime',
cache_dir=None,clip=True,cache_only=False,
cache_no_data=False):
"""
Retrieval script for USGS waterdata.usgs.gov
Retrieve one or more data products from a single station.
station: string or numeric identifier for COOPS station.
products: list of integers identifying the variable to retrieve. See
usgs_parm_codes.tsv in the directory above this directory.
start_date,end_date: period to retrieve, as python datetime, matplotlib datenum,
or numpy datetime64.
days_per_request: batch the requests to fetch smaller chunks at a time.
if this is an integer, then chunks will start with start_date, then start_date+days_per_request,
etc.
if this is a string, it is interpreted as the frequency argument to pandas.PeriodIndex.
so 'M' will request month-aligned chunks. this has the advantage that requests for different
start dates will still be aligned to integer periods, and can reuse cached data.
cache_dir: if specified, save each chunk as a netcdf file in this directory,
with filenames that include the gage, period and products. The directory must already
exist.
clip: if True, then even if more data was fetched, return only the period requested.
frequency: defaults to "realtime" which should correspond to the original
sample frequency. Alternatively, "daily" which access daily average values.
cache_only: only read from cache, not attempting to fetch any new data.
cache_no_data: periods which successfully download but contain no data are recorded
as empty files. Otherwise it is assumed that there may be a transient error, and
nothing is written to cache. Do not use this for real-time retrievals, since it may
cache no-data results from the future.
returns an xarray dataset.
Note that names of variables are inferred from parameter codes where possible,
but this is not 100% accurate with respect to the descriptions provided in the rdb,
notably "Discharge, cubic feet per second" may be reported as
"stream_flow_mean_daily"
"""
start_date=utils.to_dt64(start_date)
end_date=utils.to_dt64(end_date)
params=dict(site_no=station,
format='rdb')
for prod in products:
params['cb_%05d'%prod]='on'
# Only for small requests of recent data:
# base_url="https://waterdata.usgs.gov/nwis/uv"
# Otherwise it redirects to here:
if frequency=='realtime':
base_url="https://nwis.waterdata.usgs.gov/usa/nwis/uv/"
elif frequency=='daily':
base_url="https://waterdata.usgs.gov/nwis/dv"
else:
raise Exception("Unknown frequency: %s"%(frequency))
params['period']=''
# generator for dicing up the request period
datasets=[]
last_url=None
for interval_start,interval_end in periods(start_date,end_date,days_per_request):
params['begin_date']=utils.to_datetime(interval_start).strftime('%Y-%m-%d')
params['end_date'] =utils.to_datetime(interval_end).strftime('%Y-%m-%d')
# This is the base name for caching, but also a shorthand for reporting
# issues with the user, since it already encapsulates most of the
# relevant info in a single tidy string.
base_fn="%s_%s_%s_%s.nc"%(station,
"-".join(["%d"%p for p in products]),
params['begin_date'],
params['end_date'])
if cache_dir is not None:
cache_fn=os.path.join(cache_dir,base_fn)
else:
cache_fn=None
if (cache_fn is not None) and os.path.exists(cache_fn):
log.info("Cached %s -- %s"%(interval_start,interval_end))
if os.path.getsize(cache_fn)==0:
# Cached no-data result
log.warning(" cache for %s -- %s says no-data"%(interval_start,interval_end))
continue
ds=xr.open_dataset(cache_fn)
elif cache_only:
log.info("Cache only - no data for %s -- %s"%(interval_start,interval_end))
continue
else:
log.info("Fetching %s"%(base_fn))
sesh = requests.Session()
sesh.mount('https://', requests.adapters.HTTPAdapter(max_retries=3))
req=sesh.get(base_url,params=params)
data=req.text
ds=rdb.rdb_to_dataset(text=data)
if ds is None: # There was no data there HERE - would like to have an option to record no data
log.warning(" %s: no data found for this period"%base_fn)
if (cache_fn is not None) and cache_no_data:
log.warning(" %s: making zero-byte cache file"%base_fn)
with open(cache_fn,'wb') as fp: pass
continue
ds.attrs['url']=req.url
if cache_fn is not None:
ds.to_netcdf(cache_fn)
# USGS returns data inclusive of the requested date range - leading to some overlap
if len(datasets):
ds=ds.isel(time=ds.time>datasets[-1].time[-1])
datasets.append(ds)
if len(datasets)==0:
# could try to construct zero-length dataset, but that sounds like a pain
# at the moment.
log.warning(" no data for station %s for any periods!"%station)
return None
if len(datasets)>1:
# it's possible that not all variables appear in all datasets
# dataset=xr.concat( datasets, dim='time')
dataset=datasets[0]
for other in datasets[1:]:
dataset=dataset.combine_first(other)
for stale in datasets:
stale.close() # maybe free up FDs?
else:
dataset=datasets[0]
if clip:
time_sel=(dataset.time.values>=start_date) & (dataset.time.values<end_date)
dataset=dataset.isel(time=time_sel)
dataset.load() # force read into memory before closing files
for d in datasets:
d.close()
for meta in ['datenum','tz_cd']:
if meta in dataset.data_vars:
dataset=dataset.set_coords(meta)
return dataset
def add_salinity(ds):
assert seawater is not None
for v in ds.data_vars:
if v.startswith('specific_conductance'):
salt_name=v.replace('specific_conductance','salinity')
if salt_name not in ds:
print("%s => %s"%(v,salt_name))
salt=seawater.eos80.salt(ds[v].values/1000. / seawater.constants.c3515,
25.0, # temperature - USGS adjusts to 25degC
0) # no pressure effects
ds[salt_name]=ds[v].dims, salt
def station_metadata(station,cache_dir=None):
if cache_dir is not None:
cache_fn=os.path.join(cache_dir,"meta-%s.pkl"%station)
if os.path.exists(cache_fn):
with open(cache_fn,'rb') as fp:
meta=cPickle.load(fp)
return meta
url="https://waterdata.usgs.gov/nwis/inventory?agency_code=USGS&site_no=%s"%station
resp=requests.get(url)
m=re.search(r"Latitude\s+([.0-9&#;']+\")",resp.text)
lat=m.group(1)
m=re.search(r"Longitude\s+([.0-9&#;']+\")",resp.text)
lon=m.group(1)
def dms_to_dd(s):
s=s.replace('°',' ').replace('"',' ').replace("'"," ").strip()
d,m,s =[float(p) for p in s.split()]
return d + m/60. + s/3600.
lat=dms_to_dd(lat)
# no mention of west longitude, but can assume it is west.
lon=-dms_to_dd(lon)
meta=dict(lat=lat,lon=lon)
if cache_dir is not None:
with open(cache_fn,'wb') as fp:
cPickle.dump(meta,fp)
return meta
| mit |
chrismeyersfsu/ansible | lib/ansible/module_utils/json_utils.py | 18 | 3257 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
try:
import json
except ImportError:
import simplejson as json
# NB: a copy of this function exists in ../../modules/core/async_wrapper.py. Ensure any
# changes are propagated there.
def _filter_non_json_lines(data):
'''
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(endchar):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
| gpl-3.0 |
parlar/calls2xls | external/CrossMap/usr/lib64/python2.7/site-packages/bx/intervals/operations/subtract.py | 7 | 3271 | #!/usr/bin/env python
"""
Subtract one set of genomic intervals from another (base-by-base or whole
intervals). The returned GenomicIntervals will be in the order
of the first set of intervals passed in, with the corresponding
meta-data.
"""
import traceback
import fileinput
from warnings import warn
from bx.intervals.io import *
from bx.intervals.operations import *
def subtract(readers, mincols=1, upstream_pad=0, downstream_pad=0, pieces=True, lens={}, comments=True):
# The incoming lens dictionary is a dictionary of chromosome lengths which are used to initialize the bitsets.
# Read all but first into bitsets and union to one (if confused, read DeMorgan's...)
primary = readers[0]
union = readers[1:]
# Handle any ValueError, IndexError and OverflowError exceptions that may be thrown when
# the bitsets are being created by skipping the problem lines
union[0] = BitsetSafeReaderWrapper( union[0], lens=lens )
bitsets = union[0].binned_bitsets( upstream_pad=upstream_pad, downstream_pad=downstream_pad, lens=lens )
union = union[1:]
for andset in union:
bitset2 = andset.binned_bitsets(upstream_pad = upstream_pad, downstream_pad = downstream_pad, lens = lens)
for chrom in bitset2:
if chrom not in bitsets:
bitsets[chrom] = bitset2[chrom]
else:
bitsets[chrom].ior(bitset2[chrom])
# Read remaining intervals and subtract
for interval in primary:
if isinstance(interval, Header):
yield interval
if isinstance(interval, Comment) and comments:
yield interval
elif isinstance(interval, GenomicInterval):
chrom = interval.chrom
if chrom not in bitsets:
yield interval
else:
start = int(interval.start)
end = int(interval.end)
if start > end: warn( "Interval start after end!" )
out_intervals = []
# Find the intervals that meet the criteria (for the three sensible
# permutations of reverse and pieces)
try:
if bitsets[ chrom ].count_range( start, end-start ) >= mincols:
if pieces:
out_intervals = bits_clear_in_range( bitsets[chrom], start, end )
else:
out_intervals = [ ( start, end ) ]
# Write the intervals
for start, end in out_intervals:
new_interval = interval.copy()
new_interval.start = start
new_interval.end = end
yield new_interval
except IndexError, e:
try:
# This will work only if primary is a NiceReaderWrapper
primary.skipped += 1
# no reason to stuff an entire bad file into memmory
if primary.skipped < 10:
primary.skipped_lines.append( ( primary.linenum, primary.current_line, str( e ) ) )
except:
pass
continue
| mit |
ddna1021/spark | python/pyspark/mllib/tree.py | 24 | 24125 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import sys
import random
from pyspark import SparkContext, RDD, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc, JavaModelWrapper
from pyspark.mllib.linalg import _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.util import JavaLoader, JavaSaveable
__all__ = ['DecisionTreeModel', 'DecisionTree', 'RandomForestModel',
'RandomForest', 'GradientBoostedTreesModel', 'GradientBoostedTrees']
class TreeEnsembleModel(JavaModelWrapper, JavaSaveable):
"""TreeEnsembleModel
.. versionadded:: 1.3.0
"""
@since("1.3.0")
def predict(self, x):
"""
Predict values for a single data point or an RDD of points using
the model trained.
.. note:: In Python, predict cannot currently be used within an RDD
transformation or action.
Call predict directly on the RDD instead.
"""
if isinstance(x, RDD):
return self.call("predict", x.map(_convert_to_vector))
else:
return self.call("predict", _convert_to_vector(x))
@since("1.3.0")
def numTrees(self):
"""
Get number of trees in ensemble.
"""
return self.call("numTrees")
@since("1.3.0")
def totalNumNodes(self):
"""
Get total number of nodes, summed over all trees in the ensemble.
"""
return self.call("totalNumNodes")
def __repr__(self):
""" Summary of model """
return self._java_model.toString()
@since("1.3.0")
def toDebugString(self):
""" Full model """
return self._java_model.toDebugString()
class DecisionTreeModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
A decision tree model for classification or regression.
.. versionadded:: 1.1.0
"""
@since("1.1.0")
def predict(self, x):
"""
Predict the label of one or more examples.
.. note:: In Python, predict cannot currently be used within an RDD
transformation or action.
Call predict directly on the RDD instead.
:param x:
Data point (feature vector), or an RDD of data points (feature
vectors).
"""
if isinstance(x, RDD):
return self.call("predict", x.map(_convert_to_vector))
else:
return self.call("predict", _convert_to_vector(x))
@since("1.1.0")
def numNodes(self):
"""Get number of nodes in tree, including leaf nodes."""
return self._java_model.numNodes()
@since("1.1.0")
def depth(self):
"""
Get depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
"""
return self._java_model.depth()
def __repr__(self):
""" summary of model. """
return self._java_model.toString()
@since("1.2.0")
def toDebugString(self):
""" full model. """
return self._java_model.toDebugString()
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.DecisionTreeModel"
class DecisionTree(object):
"""
Learning algorithm for a decision tree model for classification or
regression.
.. versionadded:: 1.1.0
"""
@classmethod
def _train(cls, data, type, numClasses, features, impurity="gini", maxDepth=5, maxBins=32,
minInstancesPerNode=1, minInfoGain=0.0):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
model = callMLlibFunc("trainDecisionTreeModel", data, type, numClasses, features,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
return DecisionTreeModel(model)
@classmethod
@since("1.1.0")
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo,
impurity="gini", maxDepth=5, maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0):
"""
Train a decision tree model for classification.
:param data:
Training data: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
:param numClasses:
Number of classes for classification.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param impurity:
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
:param maxBins:
Number of bins used for finding splits at each node.
(default: 32)
:param minInstancesPerNode:
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
:param minInfoGain:
Minimum info gain required to create a split.
(default: 0.0)
:return:
DecisionTreeModel.
Example usage:
>>> from numpy import array
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = DecisionTree.trainClassifier(sc.parallelize(data), 2, {})
>>> print(model)
DecisionTreeModel classifier of depth 1 with 3 nodes
>>> print(model.toDebugString())
DecisionTreeModel classifier of depth 1 with 3 nodes
If (feature 0 <= 0.5)
Predict: 0.0
Else (feature 0 > 0.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict(array([1.0]))
1.0
>>> model.predict(array([0.0]))
0.0
>>> rdd = sc.parallelize([[1.0], [0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", numClasses, categoricalFeaturesInfo,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
@classmethod
@since("1.1.0")
def trainRegressor(cls, data, categoricalFeaturesInfo,
impurity="variance", maxDepth=5, maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0):
"""
Train a decision tree model for regression.
:param data:
Training data: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param impurity:
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
:param maxBins:
Number of bins used for finding splits at each node.
(default: 32)
:param minInstancesPerNode:
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
:param minInfoGain:
Minimum info gain required to create a split.
(default: 0.0)
:return:
DecisionTreeModel.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = DecisionTree.trainRegressor(sc.parallelize(sparse_data), {})
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {1: 0.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [0.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "regression", 0, categoricalFeaturesInfo,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
@inherit_doc
class RandomForestModel(TreeEnsembleModel, JavaLoader):
"""
Represents a random forest model.
.. versionadded:: 1.2.0
"""
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.RandomForestModel"
class RandomForest(object):
"""
Learning algorithm for a random forest model for classification or
regression.
.. versionadded:: 1.2.0
"""
supportedFeatureSubsetStrategies = ("auto", "all", "sqrt", "log2", "onethird")
@classmethod
def _train(cls, data, algo, numClasses, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy, impurity, maxDepth, maxBins, seed):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
if featureSubsetStrategy not in cls.supportedFeatureSubsetStrategies:
raise ValueError("unsupported featureSubsetStrategy: %s" % featureSubsetStrategy)
if seed is None:
seed = random.randint(0, 1 << 30)
model = callMLlibFunc("trainRandomForestModel", data, algo, numClasses,
categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity,
maxDepth, maxBins, seed)
return RandomForestModel(model)
@classmethod
@since("1.2.0")
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy="auto", impurity="gini", maxDepth=4, maxBins=32,
seed=None):
"""
Train a random forest model for binary or multiclass
classification.
:param data:
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
:param numClasses:
Number of classes for classification.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param numTrees:
Number of trees in the random forest.
:param featureSubsetStrategy:
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
if numTrees == 1, set to "all";
if numTrees > 1 (forest) set to "sqrt".
(default: "auto")
:param impurity:
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
:param maxBins:
Maximum number of bins used for splitting features.
(default: 32)
:param seed:
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
:return:
RandomForestModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = RandomForest.trainClassifier(sc.parallelize(data), 2, {}, 3, seed=42)
>>> model.numTrees()
3
>>> model.totalNumNodes()
7
>>> print(model)
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
>>> print(model.toDebugString())
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
Tree 0:
Predict: 1.0
Tree 1:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
Tree 2:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[3.0], [1.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", numClasses,
categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity,
maxDepth, maxBins, seed)
@classmethod
@since("1.2.0")
def trainRegressor(cls, data, categoricalFeaturesInfo, numTrees, featureSubsetStrategy="auto",
impurity="variance", maxDepth=4, maxBins=32, seed=None):
"""
Train a random forest model for regression.
:param data:
Training dataset: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param numTrees:
Number of trees in the random forest.
:param featureSubsetStrategy:
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
if numTrees == 1, set to "all";
if numTrees > 1 (forest) set to "onethird" for regression.
(default: "auto")
:param impurity:
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
:param maxBins:
Maximum number of bins used for splitting features.
(default: 32)
:param seed:
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
:return:
RandomForestModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = RandomForest.trainRegressor(sc.parallelize(sparse_data), {}, 2, seed=42)
>>> model.numTrees()
2
>>> model.totalNumNodes()
4
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {0: 1.0}))
0.5
>>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.5]
"""
return cls._train(data, "regression", 0, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
@inherit_doc
class GradientBoostedTreesModel(TreeEnsembleModel, JavaLoader):
"""
Represents a gradient-boosted tree model.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.GradientBoostedTreesModel"
class GradientBoostedTrees(object):
"""
Learning algorithm for a gradient boosted trees model for
classification or regression.
.. versionadded:: 1.3.0
"""
@classmethod
def _train(cls, data, algo, categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
model = callMLlibFunc("trainGradientBoostedTreesModel", data, algo, categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
return GradientBoostedTreesModel(model)
@classmethod
@since("1.3.0")
def trainClassifier(cls, data, categoricalFeaturesInfo,
loss="logLoss", numIterations=100, learningRate=0.1, maxDepth=3,
maxBins=32):
"""
Train a gradient-boosted trees model for classification.
:param data:
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1}.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param loss:
Loss function used for minimization during gradient boosting.
Supported values: "logLoss", "leastSquaresError",
"leastAbsoluteError".
(default: "logLoss")
:param numIterations:
Number of iterations of boosting.
(default: 100)
:param learningRate:
Learning rate for shrinking the contribution of each estimator.
The learning rate should be between in the interval (0, 1].
(default: 0.1)
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 3)
:param maxBins:
Maximum number of bins used for splitting features. DecisionTree
requires maxBins >= max categories.
(default: 32)
:return:
GradientBoostedTreesModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import GradientBoostedTrees
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>>
>>> model = GradientBoostedTrees.trainClassifier(sc.parallelize(data), {}, numIterations=10)
>>> model.numTrees()
10
>>> model.totalNumNodes()
30
>>> print(model) # it already has newline
TreeEnsembleModel classifier with 10 trees
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[2.0], [0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
@classmethod
@since("1.3.0")
def trainRegressor(cls, data, categoricalFeaturesInfo,
loss="leastSquaresError", numIterations=100, learningRate=0.1, maxDepth=3,
maxBins=32):
"""
Train a gradient-boosted trees model for regression.
:param data:
Training dataset: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param loss:
Loss function used for minimization during gradient boosting.
Supported values: "logLoss", "leastSquaresError",
"leastAbsoluteError".
(default: "leastSquaresError")
:param numIterations:
Number of iterations of boosting.
(default: 100)
:param learningRate:
Learning rate for shrinking the contribution of each estimator.
The learning rate should be between in the interval (0, 1].
(default: 0.1)
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 3)
:param maxBins:
Maximum number of bins used for splitting features. DecisionTree
requires maxBins >= max categories.
(default: 32)
:return:
GradientBoostedTreesModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import GradientBoostedTrees
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> data = sc.parallelize(sparse_data)
>>> model = GradientBoostedTrees.trainRegressor(data, {}, numIterations=10)
>>> model.numTrees()
10
>>> model.totalNumNodes()
12
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {0: 1.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "regression", categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
def _test():
import doctest
globs = globals().copy()
from pyspark.sql import SparkSession
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.tree tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
mellis13/moose | python/PresentationBuilder/images/DjangoWikiImage.py | 14 | 2168 | import re, urllib
from ..images import ImageBase
##
# Image class for DjangoWikiSlide class
class DjangoWikiImage(ImageBase):
@staticmethod
def validParams():
params = ImageBase.validParams()
return params
@staticmethod
def extractName(match):
return match.group(1).strip()
##
# Constructor
# @param match The result of the regex search
def __init__(self, name, params):
ImageBase.__init__(self, name, params)
# Get a reference to the image map contained in DjangoSlideSet
image_settings = self.parent.parent.images[name]
# Set the name and url parameters
if not self.isParamValid('name'):
self.parameters()['name'] = name
if not self.isParamValid('url'):
self.parameters()['url'] = image_settings['url']
# Apply the settings from the DjangoSlideSet
if image_settings['settings']:
print ' '*8 + 'Applying image settings from wiki'
for pair in image_settings['settings'].split():
k,v = pair.strip().split(':')
if k in params:
params[k] = v
#for key in params.valid_keys():
# print ' '*10, key, '=', params[key]
##
# Performs the regex matching for Django images
@staticmethod
def match(markdown):
# This list to be output
output = []
# A list of ids to avoid outputting the same image twice
ids = []
# Caption
pattern = re.compile(r'\s*\[image:([0-9]*)(.*)\]\s*\n\s{4,}(.*?)\n')
for m in pattern.finditer(markdown):
ids.append(m.group(1))
output.append({'markdown' : m.group(0), \
'name' : m.group(1), \
'caption' : m.group(3), \
'url' : None, \
'settings' : m.group(2)})
# No caption
pattern = re.compile(r'\s*\[image:([0-9]*)(.*)\]\s*\n')
for m in pattern.finditer(markdown):
id = m.group(1)
if id not in ids:
output.append({'markdown' : m.group(0), \
'name' : m.group(1), \
'caption' : None, \
'url' : None, \
'settings' : m.group(2)})
# Return the list
return output
| lgpl-2.1 |
bruceyou/NewsBlur | utils/zgrep.py | 1 | 3581 | #!/usr/bin/env python
import os
import time
import select
import subprocess
import sys
from optparse import OptionParser
from requests.exceptions import ConnectionError
sys.path.insert(0, '/srv/newsblur')
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import fabfile
NEWSBLUR_USERNAME = 'sclay'
IGNORE_HOSTS = [
'push',
]
def main(role="app", role2="work", command=None, path=None):
delay = 1
while True:
try:
streams = create_streams_for_roles(role, role2, command=command, path=path)
print " --- Loading %s App Log Tails ---" % len(streams)
read_streams(streams)
except UnicodeDecodeError: # unexpected end of data
print " --- Lost connections - Retrying... ---"
time.sleep(1)
continue
except ConnectionError:
print " --- Retrying in %s seconds... ---" % delay
time.sleep(delay)
delay += 1
continue
except KeyboardInterrupt:
print " --- End of Logging ---"
break
def create_streams_for_roles(role, role2, command=None, path=None):
streams = list()
hosts = fabfile.do(split=True)
found = set()
if not path:
path = "/srv/newsblur/logs/newsblur.log"
if not command:
command = "tail -f"
for hostname in (hosts[role] + hosts[role2]):
if isinstance(hostname, dict):
address = hostname['address']
hostname = hostname['name']
elif ':' in hostname:
hostname, address = hostname.split(':', 1)
elif isinstance(hostname, tuple):
hostname, address = hostname[0], hostname[1]
else:
address = hostname
if any(h in hostname for h in IGNORE_HOSTS): continue
if hostname in found: continue
if 'ec2' in hostname:
s = subprocess.Popen(["ssh",
"-i", os.path.expanduser(os.path.join(fabfile.env.SECRETS_PATH,
"keys/ec2.pem")),
address, "%s %s" % (command, path)], stdout=subprocess.PIPE)
else:
s = subprocess.Popen(["ssh", "-l", NEWSBLUR_USERNAME,
"-i", os.path.expanduser(os.path.join(fabfile.env.SECRETS_PATH,
"keys/newsblur.key")),
address, "%s %s" % (command, path)], stdout=subprocess.PIPE)
s.name = hostname
streams.append(s)
found.add(hostname)
return streams
def read_streams(streams):
while True:
r, _, _ = select.select(
[stream.stdout.fileno() for stream in streams], [], [])
for fileno in r:
for stream in streams:
if stream.stdout.fileno() != fileno:
continue
data = os.read(fileno, 4096)
if not data:
streams.remove(stream)
break
combination_message = "[%-6s] %s" % (stream.name[:6], data)
sys.stdout.write(combination_message)
break
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-f", "--find", dest="find")
parser.add_option("-p", "--path", dest="path")
(options, args) = parser.parse_args()
path = options.path
find = options.find
command = "zgrep \"%s\"" % find
main(role="app", role2="dev", command=command, path=path)
| mit |
shivam1111/odoo | openerp/addons/base/ir/ir_model.py | 148 | 62274 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from collections import defaultdict
import logging
import re
import time
import types
import openerp
from openerp import SUPERUSER_ID
from openerp import models, tools, api
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS, except_orm
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class unknown(models.AbstractModel):
"""
Abstract model used as a substitute for relational fields with an unknown
comodel.
"""
_name = '_unknown'
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
def _inherited_models(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for model in self.browse(cr, uid, ids, context=context):
res[model.id] = []
inherited_models = [model_name for model_name in self.pool[model.model]._inherits]
if inherited_models:
res[model.id] = self.search(cr, uid, [('model', 'in', inherited_models)], context=context)
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'inherited_model_ids': fields.function(_inherited_models, type="many2many", obj="ir.model", string="Inherited models",
help="The list of models that extends the current model."),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context = dict(context)
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
if context and context.get('manual'):
vals['state']='manual'
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','base')=='manual':
# add model in registry
self.instanciate(cr, user, vals['model'], context)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
if isinstance(model, unicode):
model = model.encode('utf-8')
class CustomModel(models.Model):
_name = model
_module = False
_custom = True
CustomModel._build_model(self.pool, cr)
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade', 'Cascade'), ('set null', 'Set NULL'), ('restrict', 'Restrict')],
'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
# remove m2m relation table for custom fields
# we consider the m2m relation is only one way as it's not possible
# to specify the relation table in the interface for custom fields
# TODO master: maybe use ir.model.relations for custom fields
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = model._fields[field.name].relation
cr.execute('DROP table "%s"' % (rel_name))
model._pop_field(field.name)
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# The field we just deleted might have be inherited, and registry is
# inconsistent in this case; therefore we reload the registry.
cr.commit()
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','base') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
self.pool.clear_manual_fields()
if vals['model'] in self.pool:
model = self.pool[vals['model']]
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
model._rec_name = 'x_name'
# re-initialize model in registry
model.__init__(self.pool, cr)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
# if set, *one* column can be renamed here
column_rename = None
# field patches {model: {field_name: {prop_name: prop_value, ...}, ...}, ...}
patches = defaultdict(lambda: defaultdict(dict))
# static table of properties
model_props = [ # (our-name, fields.prop, set_fn)
('field_description', 'string', tools.ustr),
('required', 'required', bool),
('readonly', 'readonly', bool),
('domain', 'domain', eval),
('size', 'size', int),
('on_delete', 'ondelete', str),
('translate', 'translate', bool),
('select_level', 'index', lambda x: bool(int(x))),
('selection', 'selection', eval),
]
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
obj = self.pool.get(item.model)
field = getattr(obj, '_fields', {}).get(item.name)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'base') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id.id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None and field is not None:
# find out which properties (per model) we need to update
for field_name, prop_name, func in model_props:
if field_name in vals:
prop_value = func(vals[field_name])
if getattr(field, prop_name) != prop_value:
patches[obj][final_name][prop_name] = prop_value
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
if column_rename:
obj, rename = column_rename
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % rename)
# This is VERY risky, but let us have this feature:
# we want to change the key of field in obj._fields and obj._columns
field = obj._pop_field(rename[1])
obj._add_field(rename[2], field)
self.pool.setup_models(cr, partial=(not self.pool.ready))
if patches:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context,
select=vals.get('select_level', '0'),
update_custom_fields=True,
)
for obj, model_patches in patches.iteritems():
for field_name, field_patches in model_patches.iteritems():
# update field properties, and adapt corresponding column
field = obj._fields[field_name]
attrs = dict(field._attrs, **field_patches)
obj._add_field(field_name, field.new(**attrs))
# update database schema
self.pool.setup_models(cr, partial=(not self.pool.ready))
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
if column_rename or patches:
RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool[model]
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
# The context parameter is useful when the method translates error messages.
# But as the method raises an exception in that case, the key 'lang' might
# not be really necessary as a cache key, unless the `ormcache_context`
# decorator catches the exception (it does not at the moment.)
@tools.ormcache_context(accepted_keys=('lang',))
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return bool(r)
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.invalidate_cache(cr, SUPERUSER_ID)
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, ids, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, ids, values, context=context)
return res
def create(self, cr, uid, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, values, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, ids, context=context)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def name_get(self, cr, uid, ids, context=None):
bymodel = defaultdict(dict)
names = {}
for res in self.browse(cr, uid, ids, context=context):
bymodel[res.model][res.res_id] = res
names[res.id] = res.complete_name
#result[res.model][res.res_id] = res.id
for model, id_map in bymodel.iteritems():
try:
ng = dict(self.pool[model].name_get(cr, uid, id_map.keys(), context=context))
except Exception:
pass
else:
for r in id_map.itervalues():
names[r.id] = ng.get(r.res_id, r.complete_name)
return [(i, names[i]) for i in ids]
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'model': fields.char('Model Name', required=True, select=1),
'module': fields.char('Module', required=True, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': fields.datetime.now,
'date_update': fields.datetime.now,
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
# NEW V8 API
@tools.ormcache(skiparg=3)
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return None
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xmlid))
return None
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise ValueError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
id = False
try:
# One step to check the ID is defined and the record actually exists
record = self.get_object(cr, uid, module, xml_id)
if record:
id = record.id
self.loads[(module,xml_id)] = (model,id)
for table, inherit_field in self.pool[model]._inherits.iteritems():
parent_id = record[inherit_field].id
parent_xid = '%s_%s' % (xml_id, table.replace('.', '_'))
self.loads[(module, parent_xid)] = (table, parent_id)
except Exception:
pass
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, SUPERUSER_ID, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, SUPERUSER_ID, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, SUPERUSER_ID, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, SUPERUSER_ID, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, SUPERUSER_ID, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, [res_id],
[inherit_field])[0][inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
if isinstance(models[0], (list, tuple)):
model,res_id = models[0]
else:
res_id=None
model = models[0]
if res_id:
where = ' and res_id=%s' % (res_id,)
else:
where = ' and (res_id is null)'
if key2:
where += ' and key2=\'%s\'' % (key2,)
else:
where += ' and (key2 is null)'
cr.execute('select * from ir_values where model=%s and key=%s and name=%s'+where,(model, key, name))
res = cr.fetchone()
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
if not res:
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
elif xml_id:
cr.execute('UPDATE ir_values set value=%s WHERE model=%s and key=%s and name=%s'+where,(value, model, key, name))
ir_values_obj.invalidate_cache(cr, uid, ['value'])
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
self.invalidate_cache(cr, uid, context=context)
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.models.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules or config.get('import_partial'):
return True
bad_imd_ids = []
context = {MODULE_UNINSTALL_FLAG: True}
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC
""", (tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module, name) not in self.loads:
if model in self.pool:
_logger.info('Deleting %s@%s (%s.%s)', res_id, model, module, name)
if self.pool[model].exists(cr, uid, [res_id], context=context):
self.pool[model].unlink(cr, uid, [res_id], context=context)
else:
bad_imd_ids.append(id)
if bad_imd_ids:
self.unlink(cr, uid, bad_imd_ids, context=context)
self.loads.clear()
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,),
'icon': 'STOCK_INDENT'
}, context)
return {'type':'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rubinovitz/project-x | boilerplate/external/wtforms/ext/csrf/form.py | 119 | 1788 | from __future__ import unicode_literals
from wtforms.form import Form
from wtforms.validators import ValidationError
from .fields import CSRFTokenField
class SecureForm(Form):
"""
Form that enables CSRF processing via subclassing hooks.
"""
csrf_token = CSRFTokenField()
def __init__(self, formdata=None, obj=None, prefix='', csrf_context=None, **kwargs):
"""
:param csrf_context:
Optional extra data which is passed transparently to your
CSRF implementation.
"""
super(SecureForm, self).__init__(formdata, obj, prefix, **kwargs)
self.csrf_token.current_token = self.generate_csrf_token(csrf_context)
def generate_csrf_token(self, csrf_context):
"""
Implementations must override this to provide a method with which one
can get a CSRF token for this form.
A CSRF token should be a string which can be generated
deterministically so that on the form POST, the generated string is
(usually) the same assuming the user is using the site normally.
:param csrf_context:
A transparent object which can be used as contextual info for
generating the token.
"""
raise NotImplementedError()
def validate_csrf_token(self, field):
"""
Override this method to provide custom CSRF validation logic.
The default CSRF validation logic simply checks if the recently
generated token equals the one we received as formdata.
"""
if field.current_token != field.data:
raise ValidationError(field.gettext('Invalid CSRF Token'))
@property
def data(self):
d = super(SecureForm, self).data
d.pop('csrf_token')
return d
| lgpl-3.0 |
kaplun/inspire-next | inspirehep/modules/records/json_ref_loader.py | 2 | 4458 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Resource-aware json reference loaders to be used with jsonref."""
from __future__ import absolute_import, division, print_function
import re
from flask import current_app
from jsonref import JsonLoader, JsonRef
from werkzeug.urls import url_parse
import jsonresolver
from jsonresolver.contrib.jsonref import json_loader_factory
from inspirehep.modules.pidstore.utils import get_pid_type_from_endpoint
from inspirehep.utils import record_getter
class AbstractRecordLoader(JsonLoader):
"""Base for resource-aware record loaders.
Resolves the refered resource by the given uri by first checking against
local resources.
"""
def get_record(self, pid_type, recid):
raise NotImplementedError()
def get_remote_json(self, uri, **kwargs):
parsed_uri = url_parse(uri)
# Add http:// protocol so uri.netloc is correctly parsed.
server_name = current_app.config.get('SERVER_NAME')
if not re.match('^https?://', server_name):
server_name = 'http://{}'.format(server_name)
parsed_server = url_parse(server_name)
if parsed_uri.netloc and parsed_uri.netloc != parsed_server.netloc:
return super(AbstractRecordLoader, self).get_remote_json(uri,
**kwargs)
path_parts = parsed_uri.path.strip('/').split('/')
if len(path_parts) < 2:
current_app.logger.error('Bad JSONref URI: {0}'.format(uri))
return None
endpoint = path_parts[-2]
pid_type = get_pid_type_from_endpoint(endpoint)
recid = path_parts[-1]
res = self.get_record(pid_type, recid)
return res
class ESJsonLoader(AbstractRecordLoader):
"""Resolve resources by retrieving them from Elasticsearch."""
def get_record(self, pid_type, recid):
try:
return record_getter.get_es_record(pid_type, recid)
except record_getter.RecordGetterError:
return None
class DatabaseJsonLoader(AbstractRecordLoader):
def get_record(self, pid_type, recid):
try:
return record_getter.get_db_record(pid_type, recid)
except record_getter.RecordGetterError:
return None
es_record_loader = ESJsonLoader()
db_record_loader = DatabaseJsonLoader()
SCHEMA_LOADER_CLS = json_loader_factory(
jsonresolver.JSONResolver(
plugins=['invenio_jsonschemas.jsonresolver']
)
)
"""Used in invenio-jsonschemas to resolve relative $ref."""
def replace_refs(obj, source='db'):
"""Replaces record refs in obj by bypassing HTTP requests.
Any reference URI that comes from the same server and references a resource
will be resolved directly either from the database or from Elasticsearch.
:param obj:
Dict-like object for which '$ref' fields are recursively replaced.
:param source:
List of sources from which to resolve the references. It can be any of:
* 'db' - resolve from Database
* 'es' - resolve from Elasticsearch
* 'http' - force using HTTP
:returns:
The same obj structure with the '$ref' fields replaced with the object
available at the given URI.
"""
loaders = {
'db': db_record_loader,
'es': es_record_loader,
'http': None
}
if source not in loaders:
raise ValueError('source must be one of {}'.format(loaders.keys()))
loader = loaders[source]
return JsonRef.replace_refs(obj, loader=loader, load_on_repr=False)
| gpl-3.0 |
bbozhev/flask-test | flask/lib/python2.7/site-packages/pip/_vendor/progress/helpers.py | 521 | 2854 | # Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class WriteMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WriteMixin, self).__init__(**kwargs)
self._width = 0
if message:
self.message = message
if self.file.isatty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def write(self, s):
if self.file.isatty():
b = '\b' * self._width
c = s.ljust(self._width)
print(b + c, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def finish(self):
if self.file.isatty() and self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
class WritelnMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WritelnMixin, self).__init__(**kwargs)
if message:
self.message = message
if self.file.isatty() and self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
def clearln(self):
if self.file.isatty():
print('\r\x1b[K', end='', file=self.file)
def writeln(self, line):
if self.file.isatty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file.isatty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
from signal import signal, SIGINT
from sys import exit
class SigIntMixin(object):
"""Registers a signal handler that calls finish on SIGINT"""
def __init__(self, *args, **kwargs):
super(SigIntMixin, self).__init__(*args, **kwargs)
signal(SIGINT, self._sigint_handler)
def _sigint_handler(self, signum, frame):
self.finish()
exit(0)
| mit |
mythsmith/veusz | veusz/dialogs/filterdialog.py | 8 | 4398 | # Copyright (C) 2015 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
"""Dialog for filtering data."""
from __future__ import division, print_function
from .. import qtall as qt4
from .. import document
from ..qtwidgets.datasetbrowser import DatasetBrowser
from .veuszdialog import VeuszDialog
def _(text, disambiguation=None, context="FilterDialog"):
"""Translate text."""
return qt4.QCoreApplication.translate(context, text, disambiguation)
class FilterDialog(VeuszDialog):
"""Preferences dialog."""
def __init__(self, parent, doc):
"""Setup dialog."""
VeuszDialog.__init__(self, parent, "filter.ui")
self.document = doc
self.dsbrowser = DatasetBrowser(doc, parent, None, checkable=True)
grplayout = qt4.QVBoxLayout()
grplayout.addWidget(self.dsbrowser)
self.filtergroup.setLayout(grplayout)
self.buttonBox.button(qt4.QDialogButtonBox.Apply).clicked.connect(
self.applyClicked)
self.buttonBox.button(qt4.QDialogButtonBox.Reset).clicked.connect(
self.resetClicked)
def updateStatus(self, text):
"""Show message in dialog."""
qt4.QTimer.singleShot(4000, self.statuslabel.clear)
self.statuslabel.setText(text)
def applyClicked(self):
"""Do the filtering."""
prefix = self.prefixcombo.currentText().strip()
suffix = self.suffixcombo.currentText().strip()
if not prefix and not suffix:
self.updateStatus(_("Prefix and/or suffix must be entered"))
return
expr = self.exprcombo.currentText().strip()
if not expr:
self.updateStatus(_("Enter a valid filter expression"))
return
tofilter = self.dsbrowser.checkedDatasets()
if not tofilter:
self.updateStatus(_("Choose at least one dataset to filter"))
return
invert = self.invertcheck.isChecked()
replaceblanks = self.replaceblankscheck.isChecked()
op = document.OperationDatasetsFilter(
expr,
tofilter,
prefix=prefix, suffix=suffix,
invert=invert,
replaceblanks=replaceblanks)
ok, log = op.check(self.document)
if not ok:
self.updateStatus("\n".join(log))
return
self.document.applyOperation(op)
self.updateStatus(_("Filtered %i datasets") % len(tofilter))
def resetClicked(self):
"""Reset controls to defaults."""
for cntrl in self.exprcombo, self.prefixcombo, self.suffixcombo:
cntrl.setEditText("")
self.dsbrowser.reset()
self.invertcheck.setChecked(False)
self.replaceblankscheck.setChecked(False)
self.updateStatus(_("Dialog reset"))
def reEditDialog(self, dataset):
"""Load controls with settings from dataset."""
gen = dataset.generator
self.exprcombo.setEditText(gen.inexpr)
self.prefixcombo.setEditText(gen.prefix)
self.suffixcombo.setEditText(gen.suffix)
self.invertcheck.setChecked(gen.invert)
self.replaceblankscheck.setChecked(gen.replaceblanks)
datasets = [
d for d in gen.indatasets
if d in self.document.data
]
self.dsbrowser.setCheckedDatasets(datasets)
def recreateDataset(mainwindow, document, dataset, datasetname):
"""Open dialog to recreate filter."""
dialog = FilterDialog(mainwindow, document)
mainwindow.showDialog(dialog)
dialog.reEditDialog(dataset)
| gpl-2.0 |
Livefyre/livefyre-python-utils | livefyre/tests/api/domain_test.py | 1 | 2206 | import unittest
from livefyre import Livefyre
from livefyre.tests import LfTest
from livefyre.src.api.domain import Domain
class DomainTestCase(LfTest, unittest.TestCase):
def setUp(self):
super(DomainTestCase, self).setUp()
self.network = Livefyre.get_network(self.NETWORK_NAME, self.NETWORK_KEY)
self.site = self.network.get_site(self.SITE_ID, self.SITE_KEY)
self.collection = self.site.build_comments_collection('TITLE', self.ARTICLE_ID, self.URL)
def test_quill(self):
quill_domain_ssl = 'https://{0}.quill.fyre.co'.format(self.network.network_name)
domain = Domain.quill(self.network)
self.assertEqual(quill_domain_ssl, domain)
domain = Domain.quill(self.site)
self.assertEqual(quill_domain_ssl, domain)
domain = Domain.quill(self.collection)
self.assertEqual(quill_domain_ssl, domain)
quill_domain = 'http://quill.{0}'.format(self.network.data.name)
self.network.ssl = False
domain = Domain.quill(self.network)
self.assertEqual(quill_domain, domain)
domain = Domain.quill(self.site)
self.assertEqual(quill_domain, domain)
domain = Domain.quill(self.collection)
self.assertEqual(quill_domain, domain)
def test_bootstrap(self):
bootstrap_domain_ssl = 'https://{0}.bootstrap.fyre.co'.format(self.network.network_name)
domain = Domain.bootstrap(self.network)
self.assertEqual(bootstrap_domain_ssl, domain)
domain = Domain.bootstrap(self.site)
self.assertEqual(bootstrap_domain_ssl, domain)
domain = Domain.bootstrap(self.collection)
self.assertEqual(bootstrap_domain_ssl, domain)
bootstrap_domain = 'http://bootstrap.{0}'.format(self.network.data.name)
self.network.ssl = False
domain = Domain.bootstrap(self.network)
self.assertEqual(bootstrap_domain, domain)
domain = Domain.bootstrap(self.site)
self.assertEqual(bootstrap_domain, domain)
domain = Domain.bootstrap(self.collection)
self.assertEqual(bootstrap_domain, domain)
if __name__ == '__main__':
unittest.main()
| mit |
d2emon/newspaperizer | src/book/models.py | 1 | 3561 | from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from newspaperizer.settings import settings
from django.core.files.storage import FileSystemStorage
from people.models import Person
config = settings.get('books', dict())
path = config.get('path', '')
image_fs = FileSystemStorage(
location=config.get('images_root'),
base_url=config.get('images_url'),
)
genre_image_fs = FileSystemStorage(
location=config.get('genres', dict()).get('images_root'),
base_url=config.get('genres', dict()).get('images_url'),
)
class BookGenre(models.Model):
subgenres = models.ManyToManyField('self', verbose_name=_('Subgenre'), blank=True, symmetrical=False)
title = models.CharField(_('Title'), max_length=255)
slug = models.SlugField(_('Slug'), unique=True)
folder = models.CharField(_('Folder'), max_length=255, blank=True)
image = models.ImageField(verbose_name=_('Image'), storage=genre_image_fs, blank=True)
description = models.TextField(_('Description'), max_length=10000, blank=True)
def __unicode__(self):
return self.title
def __str__(self):
return self.__unicode__()
def get_absolute_url(self):
return reverse('book_genre', args=[self.slug])
def get_supgenre(self):
supgenres = BookGenre.objects.filter(subgenres__id=self.id).all() # .all()[0]
if supgenres:
return supgenres[0]
else:
return None
def get_supgenres(self):
supgenres = []
genre = self.get_supgenre()
if genre:
supgenres.append(genre)
supgenres = supgenres + genre.get_supgenres()
import logging
logging.debug("Supgenres: %s", supgenres)
return supgenres
def get_download_link(self):
supgenres = self.get_supgenre()
if supgenres:
folders = supgenres.get_download_link()
else:
folders = path
return folders + "/" + self.folder
def preview(self):
if self.image:
return self.image.url()
else:
return ""
class Meta:
verbose_name = _('Book genre')
verbose_name_plural = _('Book genres')
ordering = ['title', ]
class Book(models.Model):
authors = models.ManyToManyField(Person, verbose_name=_('Authors'), blank=True)
genre = models.ManyToManyField('BookGenre', verbose_name=_('Genres'))
title = models.CharField(_('Title'), max_length=255)
slug = models.SlugField(_('Slug'), unique=True)
folder = models.CharField(_('Folder'), max_length=255)
image = models.ImageField(verbose_name=_('Image'), storage=image_fs, blank=True)
description = models.TextField(_('Description'), max_length=10000, blank=True)
def __unicode__(self):
return self.title
def __str__(self):
return self.__unicode__()
def get_absolute_url(self):
return reverse('book', args=[self.slug])
def get_authors_list(self):
return ", ".join([str(a) for a in self.authors.all()]) # _set.all()
def get_main_genre(self):
if self.genre.all():
return self.genre.all()[0]
else:
return None
def get_download_link(self):
folders = self.get_main_genre().get_download_link() # "/".join([str(g.folder) for g in self.genre.all()])
return folders + "/" + self.folder
class Meta:
verbose_name = _('Book')
verbose_name_plural = _('Books')
ordering = ['title', ]
| gpl-3.0 |
mattkretz/root | tutorials/pyroot/file.py | 7 | 4306 | ## \file
## \ingroup tutorial_pyroot
## This macro displays the physical ROOT file structure
##
## \macro_image
## \macro_code
##
## \author Wim Lavrijsen
from ROOT import TCanvas, TPaveLabel, TPave, TLine, TArrow, TText, TPaveText
from ROOT import gROOT
c1 = TCanvas( 'c1', 'ROOT File description', 200, 10, 700, 550 )
c1.Range( 0, -0.25, 21, 14 )
title = TPaveLabel( 5, 12, 15, 13.7, c1.GetTitle() )
title.SetFillColor( 16 )
title.Draw()
# horizonthal file layout
file = TPave( 1, 8.5, 20, 11 )
file.SetFillColor( 11 )
file.Draw()
fileh = TPave( 1, 8.5, 2.5, 11 )
fileh.SetFillColor( 44 )
fileh.Draw()
lrh = TPave( 2.5, 8.5, 3.3, 11, 1 )
lrh.SetFillColor( 33 )
lrh.Draw()
lrh.DrawPave( 6.9, 8.5, 7.7, 11, 1 )
lrh.DrawPave( 10.5, 8.5, 11.3, 11, 1 )
lrh.DrawPave( 14.5, 8.5, 15.3, 11, 1 )
ldot = TLine( 1, 8.5, 0.5, 6.5 )
ldot.SetLineStyle( 2 )
ldot.Draw()
ldot.DrawLine( 2.5, 8.5, 9.4, 6.5 )
ldot.DrawLine( 10.5, 8.5, 10, 6.5 )
ldot.DrawLine( 11.3, 8.5, 19.5, 6.5 )
line = TLine( 2.6, 11, 2.6, 11.5 )
line.Draw()
line.DrawLine( 2.6, 11.5, 7, 11.5 )
arrow = TArrow( 7, 11.5, 7, 11.1, 0.01, '|>' )
arrow.SetFillStyle( 1001 )
arrow.Draw()
line.DrawLine( 7, 8.5, 7, 8.0 )
line.DrawLine( 7, 8.0, 10.6, 8 )
arrow.DrawArrow( 10.6,8, 10.6, 8.4, 0.01, '|>' )
line.DrawLine( 10.6, 11, 10.6, 11.5 )
line.DrawLine( 10.6, 11.5, 14.6, 11.5 )
arrow.DrawArrow( 14.6, 11.5, 14.6, 11.1, 0.01, '|>' )
line.DrawLine( 14.6, 8.5, 14.6, 8.0 )
line.DrawLine( 14.6, 8.0, 16, 8 )
ldot.DrawLine( 16, 8, 19, 8 )
vert = TText( 1.5, 9.75, 'File' )
vert.SetTextAlign( 21 )
vert.SetTextAngle( 90 )
vert.SetTextSize( 0.025 )
vert.Draw()
vert.DrawText( 2.0, 9.75, 'Header' )
vert.DrawText( 2.9, 9.75, 'Logical Record' )
vert.DrawText( 3.2, 9.75, 'Header' )
vert.DrawText( 7.3, 9.75, 'Logical Record' )
vert.DrawText( 7.6, 9.75, 'Header' )
vert.DrawText( 10.9, 9.75, 'Logical Record' )
vert.DrawText( 11.2, 9.75, 'Header' )
vert.DrawText( 14.9, 9.75, 'Logical Record' )
vert.DrawText( 15.2, 9.75, 'Header' )
hori = TText( 4.75, 10, 'Object' )
hori.SetTextAlign( 22 )
hori.SetTextSize( 0.035 )
hori.Draw()
hori.DrawText( 4.75, 9.5, 'Data' )
hori.DrawText( 9.2, 10, 'Deleted' )
hori.DrawText( 9.2, 9.5, 'Object' )
line.DrawLine( 6.9, 8.5, 10.5, 11 )
line.DrawLine( 6.9, 11, 10.5, 8.5 )
tbig = TText( 17, 9.75, '............' )
tbig.SetTextAlign( 22 )
tbig.SetTextSize( 0.03 )
tbig.Draw()
tbig.DrawText( 2.6, 7, 'fBEGIN' )
tbig.DrawText( 20., 7, 'fEND' )
arrow.DrawArrow( 2.6, 7, 2.6, 8.4, 0.01, '|>' )
arrow.DrawArrow( 20, 7, 20, 8.4, 0.01, '|>' )
#file header
header = TPaveText( 0.5, .2, 9.4, 6.5 )
header.SetFillColor( 44 )
header.Draw()
fh = header.AddText( 'File Header' )
fh.SetTextAlign( 22 )
fh.SetTextSize( 0.04 )
header.SetTextSize( 0.027 )
header.SetTextAlign( 12 )
header.AddText( ' ' )
header.AddLine( 0, 0, 0, 0 )
header.AddText( '"root": Root File Identifier' )
header.AddText( 'fVersion: File version identifier' )
header.AddText( 'fBEGIN: Pointer to first data record' )
header.AddText( 'fEND: Pointer to first free word at EOF' )
header.AddText( 'fSeekFree: Pointer to FREE data record' )
header.AddText( 'fNbytesFree: Number of bytes in FREE' )
header.AddText( 'fNfree: Number of free data records' )
header.AddText( 'fNbytesName: Number of bytes in name/title' )
header.AddText( 'fUnits: Number of bytes for pointers' )
header.AddText( 'fCompress: Compression level' )
#logical record header
lrecord = TPaveText( 10, 0.2, 19.5, 6.5 )
lrecord.SetFillColor( 33 )
lrecord.Draw()
tlrh = lrecord.AddText( 'Logical Record Header (TKEY)' )
tlrh.SetTextAlign( 22 )
tlrh.SetTextSize( 0.04 )
lrecord.SetTextSize( 0.027 )
lrecord.SetTextAlign( 12 )
lrecord.AddText( ' ' )
lrecord.AddLine( 0, 0, 0, 0 )
lrecord.AddText( 'fNbytes: Length of compressed object' )
lrecord.AddText( 'fVersion: Key version identifier' )
lrecord.AddText( 'fObjLen: Length of uncompressed object' )
lrecord.AddText( 'fDatime: Date/Time when written to store' )
lrecord.AddText( 'fKeylen: Number of bytes for the key' )
lrecord.AddText( 'fCycle : Cycle number' )
lrecord.AddText( 'fSeekKey: Pointer to object on file' )
lrecord.AddText( 'fSeekPdir: Pointer to directory on file' )
lrecord.AddText( 'fClassName: class name of the object' )
lrecord.AddText( 'fName: name of the object' )
lrecord.AddText( 'fTitle: title of the object' )
c1.Update()
| lgpl-2.1 |
andrewschaaf/pj-core | js/core/matrix.py | 1 | 1563 |
'''
matrix: [[1, 2, 3],
[4, 5, 6]]
'''
def matrix_solid(h, w, v):
m2 = []
for y in range(h):
row = []
for x in range(w):
row.push(v)
m2.push(row)
return m2
def matrix_via_cellfunc(h, w, f):
m2 = []
for y in range(h):
row = []
for x in range(w):
row.push(f(y, x))
m2.push(row)
return m2
def matrix_height(m):
return len(m)
def matrix_width(m):
return len(m[0])
def matrix_print(m, msg):
if not msg:
msg = 'matrix'
print('---- ' + msg + ' ----')
for y in range(matrix_height(m)):
print(m[y])
def matrix_rotated(m, rotation90s):
n = len(m)
n_minus_one = n - 1
# new blank matrix
m2 = matrix_solid(n, n, 0)
# fill new matrix
for y in range(n):
for x in range(n):
if rotation90s == 0:
m2[y][x] = m[y][x]
elif rotation90s == 1:
m2[y][x] = m[n_minus_one - x][y]
elif rotation90s == 2:
m2[y][x] = m[n_minus_one - y][n_minus_one - x]
elif rotation90s == 3:
m2[y][x] = m[x][n_minus_one - y]
return m2
def matrix_centerOfGravity(m):
totalWeight = 0
xsum = 0
ysum = 0
for y in range(matrix_height(m)):
for x in range(matrix_width(m)):
weight = m[y][x]
totalWeight += weight
xsum += weight * (x + 0.5)
ysum += weight * (y + 0.5)
return [ysum / totalWeight, xsum / totalWeight]
| mit |
MSEMJEJME/Get-Dumped | renpy/character.py | 1 | 31488 | # Copyright 2004-2012 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# The Character object (and friends).
import renpy.display
import re
# This matches the dialogue-relevant text tags.
TAG_RE = re.compile(r'(\{\{)|(\{(p|w|nw|fast)(?:\=([^}]*))?\})', re.S)
class DialogueTextTags(object):
"""
This object parses the text tags that only make sense in dialogue,
like {fast}, {p}, {w}, and {nw}.
"""
def __init__(self, s):
# The text that we've accumulated, not including any tags.
self.text = ""
# The index in the produced string where each pause starts.
self.pause_start = [ 0 ]
# The index in the produced string where each pause ends.
self.pause_end = [ ]
# The time to delay for each pause. None to delay forever.
self.pause_delay = [ ]
# True if we've encountered the no-wait tag.
self.no_wait = False
i = iter(TAG_RE.split(s))
while True:
try:
self.text += i.next()
quoted = i.next()
full_tag = i.next()
tag = i.next()
value = i.next()
if value is not None:
value = float(value)
if quoted is not None:
self.text += quoted
continue
if tag == "p" or tag == "w":
self.pause_start.append(len(self.text))
self.pause_end.append(len(self.text))
self.pause_delay.append(value)
elif tag == "nw":
self.no_wait = True
elif tag == "fast":
self.pause_start = [ len(self.text) ]
self.pause_end = [ ]
self.pause_delay = [ ]
self.no_wait = False
self.text += full_tag
except StopIteration:
break
self.pause_end.append(len(self.text))
if self.no_wait:
self.pause_delay.append(0)
else:
self.pause_delay.append(None)
def predict_show_display_say(who, what, who_args, what_args, window_args, image=False, two_window=False, side_image=None, screen=None, **kwargs):
"""
This is the default function used by Character to predict images that
will be used by show_display_say. It's called with more-or-less the
same parameters as show_display_say, and it's expected to return a
list of images used by show_display_say.
"""
if screen:
props = compute_widget_properties(who_args, what_args, window_args)
renpy.display.screen.predict_screen(
screen,
_widget_properties=props,
who=who,
what=what,
image=image,
two_window=two_window,
side_image=side_image,
**kwargs)
return
if image:
if image != "<Dynamic>":
renpy.easy.predict(who)
if side_image:
renpy.easy.predict(side_image)
def compute_widget_properties(who_args, what_args, window_args, variant=None):
"""
Computes and returns the widget properties.
"""
def style_args(d):
if not "style" in d:
return d
in_rollback = renpy.exports.in_rollback()
if (not in_rollback) and (not variant):
return d
d = d.copy()
style = d["style"]
if isinstance(style, basestring):
style = getattr(renpy.store.style, style)
if variant is not None:
style = style[variant]
if in_rollback:
style = style["rollback"]
d["style"] = style
return d
who_args = style_args(who_args)
what_args = style_args(what_args)
window_args = style_args(window_args)
return {
"window" : window_args,
"what" : what_args,
"who" : who_args,
}
def show_display_say(who, what, who_args={}, what_args={}, window_args={},
image=False, side_image=None, two_window=False,
two_window_vbox_properties={},
who_window_properties={},
say_vbox_properties={},
transform=None,
variant=None,
screen=None,
**kwargs):
"""
This is called (by default) by renpy.display_say to add the
widgets corresponding to a screen of dialogue to the user. It is
not expected to be called by the user, but instead to be called by
display_say, or by a function passed as the show_function argument
to Character or display_say.
@param who: The name of the character that is speaking, or None to
not show this name to the user.
@param what: What that character is saying. Please not that this
may not be a string, as it can also be a list containing both text
and displayables, suitable for use as the first argument of ui.text().
@param who_args: Additional keyword arguments intended to be
supplied to the ui.text that creates the who widget of this dialogue.
@param what_args: Additional keyword arguments intended to be
supplied to the ui.text that creates the what widget of this dialogue.
@param window_args: Additional keyword arguments intended to be
supplied to the ui.window that creates the who widget of this
dialogue.
@param image: If True, then who should be interpreted as an image
or displayable rather than a text string.
@param kwargs: Additional keyword arguments should be ignored.
This function is required to return the ui.text() widget
displaying the what text.
"""
props = compute_widget_properties(who_args, what_args, window_args, variant=variant)
def handle_who():
if who:
if image:
renpy.ui.add(renpy.display.im.image(who, loose=True, **props["who"]))
else:
renpy.ui.text(who, **who_args)
def merge_style(style, properties):
if isinstance(style, basestring):
style = getattr(renpy.store.style, style)
if variant is not None:
style = style[variant]
if renpy.exports.in_rollback():
style = style["rollback"]
rv = dict(style=style)
rv.update(properties)
return rv
if screen and renpy.display.screen.has_screen(screen):
tag = screen
index = 0
while renpy.exports.showing(tag):
index += 1
tag = "%s%d" % (screen, index)
renpy.display.screen.show_screen(
screen,
_widget_properties=props,
_transient = True,
_tag = tag,
image=image,
side_image=side_image,
two_window=two_window,
who=who,
what=what,
**kwargs)
renpy.exports.shown_window()
return renpy.display.screen.get_widget(screen, "what")
# Apply the transform.
if transform:
renpy.ui.at(transform)
if two_window:
# Opens say_two_window_vbox.
renpy.ui.vbox(**merge_style('say_two_window_vbox', two_window_vbox_properties))
renpy.ui.window(**merge_style('say_who_window', who_window_properties))
handle_who()
renpy.ui.window(**props["window"])
# Opens the say_vbox.
renpy.ui.vbox(**merge_style('say_vbox', say_vbox_properties))
if not two_window:
handle_who()
rv = renpy.ui.text(what, **props["what"])
# Closes the say_vbox.
renpy.ui.close()
if two_window:
# Closes the say_two_window_vbox.
renpy.ui.close()
if side_image:
renpy.ui.image(side_image)
renpy.exports.shown_window()
return rv
class SlowDone(object):
delay = None
def __init__(self, ctc, ctc_position, callback, interact, type, cb_args, delay): #@ReservedAssignment
self.ctc = ctc
self.ctc_position = ctc_position
self.callback = callback
self.interact = interact
self.type = type
self.cb_args = cb_args
self.delay = delay
def __call__(self):
if self.ctc and self.ctc_position == "fixed":
renpy.display.screen.show_screen("_ctc", _transient=True, ctc=self.ctc)
renpy.exports.restart_interaction()
if self.delay is not None:
renpy.ui.pausebehavior(self.delay, True)
renpy.exports.restart_interaction()
for c in self.callback:
c("slow_done", interact=self.interact, type=self.type, **self.cb_args)
# This function takes care of repeatably showing the screen as part of
# an interaction.
def display_say(
who,
what,
show_function,
interact,
slow,
afm,
ctc,
ctc_pause,
ctc_position,
all_at_once,
cb_args,
with_none,
callback,
type, #@ReservedAssignment
checkpoint=True,
ctc_timedpause=None,
ctc_force=False):
# If we're in fast skipping mode, don't bother with say
# statements at all.
if interact and renpy.config.skipping == "fast":
# Clears out transients.
renpy.exports.with_statement(None)
return
# Figure out the callback(s) we want to use.
if callback is None:
if renpy.config.character_callback:
callback = [ renpy.config.character_callback ]
else:
callback = [ ]
if not isinstance(callback, list):
callback = [ callback ]
callback = renpy.config.all_character_callbacks + callback
# Call the begin callback.
for c in callback:
c("begin", interact=interact, type=type, **cb_args)
if renpy.exports.roll_forward_info():
roll_forward = False
else:
roll_forward = None
# If we're just after a rollback or roll_forward, disable slow.
after_rollback = renpy.game.after_rollback
if after_rollback:
slow = False
# If we're committed to skipping this statement, disable slow.
elif (renpy.config.skipping and
(renpy.game.preferences.skip_unseen or
renpy.game.context().seen_current(True))):
slow = False
# Figure out which pause we're on. (Or set the pause to None in
# order to put us in all-at-once mode.)
if not interact:
all_at_once = True
dtt = DialogueTextTags(what)
if all_at_once:
pause_start = [ dtt.pause_start[0] ]
pause_end = [ len(dtt.text) ]
pause_delay = [ dtt.pause_delay[-1] ]
else:
pause_start = dtt.pause_start
pause_end = dtt.pause_end
pause_delay = dtt.pause_delay
for i, (start, end, delay) in enumerate(zip(pause_start, pause_end, pause_delay)):
last_pause = (i == len(pause_start) - 1)
# If we're going to do an interaction, then saybehavior needs
# to be here.
if interact:
behavior = renpy.ui.saybehavior(allow_dismiss=renpy.config.say_allow_dismiss)
else:
behavior = None
# The string to show.
what_string = dtt.text
# Figure out the CTC to use, if any.
if last_pause:
what_ctc = ctc
else:
if delay is not None:
what_ctc = ctc_timedpause or ctc_pause
else:
what_ctc = ctc_pause
if not (interact or ctc_force):
what_ctc = None
what_ctc = renpy.easy.displayable_or_none(what_ctc)
if what_ctc is not None:
what_ctc = what_ctc.parameterize(('ctc',), ())
if delay == 0:
what_ctc = None
# Create the callback that is called when the slow text is done.
slow_done = SlowDone(what_ctc, ctc_position, callback, interact, type, cb_args, delay)
# Run the show callback.
for c in callback:
c("show", interact=interact, type=type, **cb_args)
# Show the text.
what_text = show_function(who, what_string)
if not isinstance(what_text, renpy.text.text.Text): #@UndefinedVariable
raise Exception("The say screen (or show_function) must return a Text object.")
if what_ctc and ctc_position == "nestled":
what_text.set_ctc(what_ctc)
# Update the properties of the what_text widget.
what_text.start = start
what_text.end = end
what_text.slow = slow
what_text.slow_done = slow_done
for c in callback:
c("show_done", interact=interact, type=type, **cb_args)
if behavior and afm:
behavior.set_afm_length(end - start)
if interact:
rv = renpy.ui.interact(mouse='say', type=type, roll_forward=roll_forward)
# This is only the case if the user has rolled forward, {nw} happens, or
# maybe in some other obscure cases.
if rv is False:
break
if not last_pause:
for i in renpy.config.say_sustain_callbacks:
i()
# Do the checkpoint and with None.
if interact:
if not dtt.no_wait:
if checkpoint:
renpy.exports.checkpoint(True)
else:
renpy.game.after_rollback = after_rollback
if with_none is None:
with_none = renpy.config.implicit_with_none
if with_none:
renpy.game.interface.do_with(None, None)
for c in callback:
c("end", interact=interact, type=type, **cb_args)
# This is used to flag values that haven't been set by the user.
NotSet = object()
class ADVCharacter(object):
"""
The character object contains information about a character. When
passed as the first argument to a say statement, it can control
the name that is displayed to the user, and the style of the label
showing the name, the text of the dialogue, and the window
containing both the label and the dialogue.
"""
# Properties beginning with what or window that are treated
# specially.
special_properties = [
'what_prefix',
'what_suffix',
'who_prefix',
'who_suffix',
'show_function',
]
# When adding a new argument here, remember to add it to copy below.
def __init__(
self,
name=NotSet,
kind=None,
**properties):
if kind is None:
kind = renpy.store.adv
if name is not NotSet:
properties["name"] = name
# This grabs a value out of properties, and then grabs it out of
# kind if it's not set.
def v(n):
if n in properties:
return properties.pop(n)
else:
return getattr(kind, n)
# Similar, but it grabs the value out of kind.display_args instead.
def d(n):
if n in properties:
return properties.pop(n)
else:
return kind.display_args[n]
self.name = v('name')
self.who_prefix = v('who_prefix')
self.who_suffix = v('who_suffix')
self.what_prefix = v('what_prefix')
self.what_suffix = v('what_suffix')
self.show_function = v('show_function')
self.predict_function = v('predict_function')
self.condition = v('condition')
self.dynamic = v('dynamic')
self.screen = v('screen')
self.mode = v('mode')
if renpy.config.new_character_image_argument:
if "image" in properties:
self.image_tag = properties.pop("image")
else:
self.image_tag = kind.image_tag
else:
self.image_tag = None
self.display_args = dict(
interact = d('interact'),
slow = d('slow'),
afm = d('afm'),
ctc = renpy.easy.displayable_or_none(d('ctc')),
ctc_pause = renpy.easy.displayable_or_none(d('ctc_pause')),
ctc_timedpause = renpy.easy.displayable_or_none(d('ctc_timedpause')),
ctc_position = d('ctc_position'),
all_at_once = d('all_at_once'),
with_none = d('with_none'),
callback = d('callback'),
type = d('type'),
)
if kind:
self.who_args = kind.who_args.copy()
self.what_args = kind.what_args.copy()
self.window_args = kind.window_args.copy()
self.show_args = kind.show_args.copy()
self.cb_args = kind.cb_args.copy()
else:
self.who_args = { "substitute" : False }
self.what_args = { "substitute" : False }
self.window_args = { }
self.show_args = { }
self.cb_args = { }
if not renpy.config.new_character_image_argument:
if "image" in properties:
self.show_args["image"] = properties.pop("image")
if "slow_abortable" in properties:
self.what_args["slow_abortable"] = properties.pop("slow_abortable")
for k in list(properties):
if "_" in k:
prefix, suffix = k.split("_", 1)
if prefix == "show":
self.show_args[suffix] = properties[k]
continue
elif prefix == "cb":
self.cb_args[suffix] = properties[k]
continue
elif prefix == "what":
self.what_args[suffix] = properties[k]
continue
elif prefix == "window":
self.window_args[suffix] = properties[k]
continue
elif prefix == "who":
self.who_args[suffix] = properties[k]
continue
self.who_args[k] = properties[k]
def copy(self, name=NotSet, **properties):
return type(self)(name, kind=self, **properties)
# This is called before the interaction.
def do_add(self, who, what):
return
# This is what shows the screen for a given interaction.
def do_show(self, who, what):
return self.show_function(
who,
what,
who_args=self.who_args,
what_args=self.what_args,
window_args=self.window_args,
screen=self.screen,
**self.show_args)
# This is called after the last interaction is done.
def do_done(self, who, what):
return
# This is called when an extend occurs, before the usual add/show
# cycel.
def do_extend(self):
return
# This is called to actually do the displaying.
def do_display(self, who, what, **display_args):
display_say(who,
what,
self.do_show,
**display_args)
# This is called to predict images that will be used by this
# statement.
def do_predict(self, who, what):
return self.predict_function(
who,
what,
who_args=self.who_args,
what_args=self.what_args,
window_args=self.window_args,
screen=self.screen,
**self.show_args)
def resolve_say_attributes(self, predict):
"""
Deals with image attributes associated with the current say
statement.
"""
attrs = renpy.exports.get_say_attributes()
if not attrs:
return
if not self.image_tag:
if not predict:
raise Exception("Say has image attributes %r, but there's no image tag associated with the speaking character." % (attrs,))
else:
return
tagged_attrs = (self.image_tag,) + attrs
images = renpy.game.context().images
# If image is showing already, resolve it, then show or predict it.
if images.showing("master", (self.image_tag,)):
new_image = images.apply_attributes("master", self.image_tag, tagged_attrs)
if new_image is None:
new_image = tagged_attrs
if predict:
images.predict_show(new_image)
else:
trans = renpy.config.say_attribute_transition
if trans is not None:
renpy.exports.with_statement(None)
renpy.exports.show(new_image)
if trans is not None:
renpy.exports.with_statement(trans)
else:
# Otherwise, just record the attributes of the image.
images.predict_show("master", tagged_attrs, show=False)
def __call__(self, what, interact=True, **kwargs):
# Check self.condition to see if we should show this line at all.
if not (self.condition is None or renpy.python.py_eval(self.condition)):
return True
self.resolve_say_attributes(False)
old_side_image_attributes = renpy.exports.side_image_attributes
if self.image_tag:
attrs = (self.image_tag,) + renpy.game.context().images.get_attributes("master", self.image_tag)
else:
attrs = None
renpy.exports.side_image_attributes = attrs
try:
if interact:
renpy.exports.mode(self.mode)
# Figure out the arguments to display.
display_args = self.display_args.copy()
display_args.update(kwargs)
display_args["interact"] = display_args["interact"] and interact
who = self.name
# If dynamic is set, evaluate the name expression.
if self.dynamic:
who = renpy.python.py_eval(who)
if who is not None:
who = renpy.substitutions.substitute(who)
who = self.who_prefix + who + self.who_suffix
what = renpy.substitutions.substitute(what)
what = self.what_prefix + what + self.what_suffix
# Run the add_function, to add this character to the
# things like NVL-mode.
self.do_add(who, what)
# Now, display the damned thing.
self.do_display(who, what, cb_args=self.cb_args, **display_args)
# Indicate that we're done.
self.do_done(who, what)
# Finally, log this line of dialogue.
if who and isinstance(who, (str, unicode)):
renpy.exports.log(who)
renpy.exports.log(what)
renpy.exports.log("")
finally:
renpy.exports.side_image_attributes = old_side_image_attributes
def predict(self, what):
self.resolve_say_attributes(True)
old_side_image_attributes = renpy.exports.side_image_attributes
if self.image_tag:
attrs = self.image_tag + renpy.game.context().images.get_attributes("master", self.image_tag)
else:
attrs = None
renpy.exports.side_image_attributes = attrs
try:
if self.dynamic:
who = "<Dynamic>"
else:
who = self.name
return self.do_predict(who, what)
finally:
renpy.exports.side_image_attributes = old_side_image_attributes
def will_interact(self):
if not (self.condition is None or renpy.python.py_eval(self.condition)):
return False
return self.display_args['interact']
def Character(name=NotSet, kind=None, **properties):
"""
:doc: character
:args: (name, kind=adv, **args)
:name: Character
Creates and returns a Character object, which controls the look
and feel of dialogue and narration.
`name`
If a string, the name of the character for dialogue. When
`name` is ``None``, display of the name is omitted, as for
narration.
`kind`
The Character to base this Character off of. When used, the
default value of any argument not supplied to this Character
is the value of that argument supplied to `kind`. This can
be used to define a template character, and then copy that
character with changes.
**Linked Image**
An image tag may be associated with a Character. This allows a
say statement involving this character to display an image with
the tag, and also allows Ren'Py to automatically select a side
image to show when this character speaks.
`image`
A string giving the image tag that is linked with this
character.
**Prefixes and Suffixes.**
These allow a prefix and suffix to be applied to the name of the
character, and to the text being shown. This can be used, for
example, to add quotes before and after each line of dialogue.
`what_prefix`
A string that is prepended to the dialogue being spoken before
it is shown.
`what_suffix`
A string that is appended to the dialogue being spoken before
it is shown.
`who_prefix`
A string that is prepended to the name of the character before
it is shown.
`who_suffix`
A string that is appended to the name of the character before
it is shown.
**Changing Name Display.**
These options help to control the display of the name.
`dynamic`
If true, then `name` should be a string containing a python
expression. That string will be evaluated before each line
of dialogue, and the result used as the name of the character.
**Controlling Interactions.**
These options control if the dialogue is displayed, if an
interaction occurs, and the mode that is entered upon display.
`condition`
If given, this should be a string containing a python
expression. If the expression is false, the dialogue
does not occur, as if the say statement did not happen.
`interact`
If true, the default, an interaction occurs whenever the
dialogue is shown. If false, an interaction will not occur,
and additional elements can be added to the screen.
`mode`
A string giving the mode to enter when this character
speaks. See the section on :ref:`modes <modes>` for more details.
`callback`
A function that is called when events occur while the
character is speaking. See the section on
:ref:`character callbacks` fore more information.
**Click-to-continue.**
A click-to-continue indicator is displayed once all the text has
finished displaying, to prompt the user to advance.
`ctc`
A Displayable to use as the click-to-continue indicator, unless
a more specific indicator is used.
`ctc_pause`
A Displayable to use a the click-to-continue indicator when the
display of text is paused by the {p} or {w} text tags.
`ctc_timedpause`
A Displayable to use a the click-to-continue indicator when the
display of text is paused by the {p=} or {w=} text tags. When
None, this takes its default from ctc_pause, use ``Null()``
when you want a ctc_pause but no ctc_timedpause.
`ctc_position`
Controls the location of the click-to-continue indicator. If
``"nestled"``, the indicator is displayed as part of the text
being shown, immediately after the last character. If ``"fixed"``,
the indicator is added to the screen, and its position is
controlled by the position style properties.
**Screens.**
The display of dialogue uses a :ref:`screen <screens>`. These arguments
allow you to select that screen, and to provide arguments to it.
`screen`
The name of the screen that is used to display the dialogue.
Keyword arguments beginning with ``show_`` have the prefix
stripped off, and are passed to the screen as arguments. For
example, the value of ``show_side_image`` will become the
value of the ``side_image`` variable in the screen.
Some useful ``show_`` variables implemented by the default screens are:
`show_side_image`
When given a Displayable, shows that displayable when the
dialogue is shown. The position of that displayable is
controlled by its position properties. This is often used
to show an image of the speaking character to the side
of the dialogue.
`show_two_window`
If true, restructures the layout so that the name of the
character is placed in one window, and the dialogue text in a
second window.
**Styling Text and Windows.**
Keyword arguments beginning with ``who_``, ``what_``, and
`window_`` have their prefix stripped, and are used to :ref:`style
<styles>` the character name, the spoken text, and the window
containing both, respectively.
For example, if a character is given the keyword argument
``who_color="#c8ffc8"``, the color of the character's name is
changed, in this case to green. ``window_background="frame.png"``
sets the background of the window containing this character's
dialogue.
The style applied to the character name, spoken text, and window
can also be set this way, using the `who_style`, `what_style`, and
`window_style` arguments, respectively.
"""
if kind is None:
kind = renpy.store.adv
return type(kind)(name, kind=kind, **properties)
def DynamicCharacter(name_expr, **properties):
return Character(name_expr, dynamic=True, **properties)
| gpl-2.0 |
skraghu/softlayer-python | SoftLayer/CLI/summary.py | 5 | 1051 | """Account summary."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
COLUMNS = ['datacenter',
'hardware',
'virtual_servers',
'vlans',
'subnets',
'public_ips']
@click.command()
@click.option('--sortby',
help='Column to sort by',
default='datacenter',
type=click.Choice(COLUMNS))
@environment.pass_env
def cli(env, sortby):
"""Account summary."""
mgr = SoftLayer.NetworkManager(env.client)
datacenters = mgr.summary_by_datacenter()
table = formatting.Table(COLUMNS)
table.sortby = sortby
for name, datacenter in datacenters.items():
table.add_row([
name,
datacenter['hardware_count'],
datacenter['virtual_guest_count'],
datacenter['vlan_count'],
datacenter['subnet_count'],
datacenter['public_ip_count'],
])
env.fout(table)
| mit |
pks/cdec-dtrain | realtime/rt/rt.py | 5 | 19079 | #!/usr/bin/env python
import argparse
import collections
import logging
import os
import shutil
import signal
import StringIO
import subprocess
import sys
import tempfile
import threading
import time
import cdec
import aligner
import decoder
import util
# Dummy input token that is unlikely to appear in normalized data (but no fatal errors if it does)
LIKELY_OOV = '(OOV)'
# For parsing rt.ini
TRUE = ('true', 'True', 'TRUE')
logger = logging.getLogger('rt')
class ExtractorWrapper:
'''Wrap cdec.sa.GrammarExtractor. Used to keep multiple instances of the extractor from causing Python to segfault.
Do not use directly unless you know what you're doing.'''
def __init__(self, config):
# Make sure pycdec is on PYTHONPATH
cdec_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
pycdec = os.path.join(cdec_root, 'python')
env = os.environ.copy()
python_path = env.get('PYTHONPATH', '')
if 'cdec/python' not in python_path:
python_path = '{}:{}'.format(python_path, pycdec) if len(python_path) > 0 else pycdec
env['PYTHONPATH'] = python_path
# Start grammar extractor as separate process using stdio
cmd = ['python', '-m', 'cdec.sa.extract', '-o', '-z', '-c', config, '-t']
logger.info('Executing: {}'.format(' '.join(cmd)))
self.p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
util.consume_stream(self.p.stderr)
self.lock = util.FIFOLock()
def close(self, force=False):
if not force:
self.lock.acquire()
self.p.stdin.close()
self.p.wait()
self.lock.release()
else:
os.kill(self.p.pid, signal.SIGTERM)
def drop_ctx(self, ctx_name):
self.lock.acquire()
self.p.stdin.write('{} ||| drop\n'.format(ctx_name))
self.p.stdout.readline()
self.lock.release()
def grammar(self, sentence, grammar_file, ctx_name):
self.lock.acquire()
self.p.stdin.write('{} ||| {} ||| {}\n'.format(ctx_name, sentence, grammar_file))
self.p.stdout.readline()
self.lock.release()
def add_instance(self, source, target, alignment, ctx_name):
self.lock.acquire()
self.p.stdin.write('{} ||| {} ||| {} ||| {}\n'.format(ctx_name, source, target, alignment))
self.p.stdout.readline()
self.lock.release()
class RealtimeDecoder:
'''Do not use directly unless you know what you're doing. Use RealtimeTranslator.'''
def __init__(self, configdir, tmpdir, hpyplm=False, metric='ibm_bleu'):
cdec_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
self.tmp = tmpdir
os.mkdir(self.tmp)
# HPYPLM reference stream
self.hpyplm = hpyplm
if self.hpyplm:
ref_fifo_file = os.path.join(self.tmp, 'ref.fifo')
os.mkfifo(ref_fifo_file)
self.ref_fifo = open(ref_fifo_file, 'w+')
# Start with empty line (do not learn prior to first input)
self.ref_fifo.write('\n')
self.ref_fifo.flush()
# Decoder
decoder_config = [[f.strip() for f in line.split('=')] for line in open(os.path.join(configdir, 'cdec.ini'))]
util.cdec_ini_for_realtime(decoder_config, os.path.abspath(configdir), ref_fifo_file if self.hpyplm else None)
decoder_config_file = os.path.join(self.tmp, 'cdec.ini')
with open(decoder_config_file, 'w') as output:
for (k, v) in decoder_config:
output.write('{}={}\n'.format(k, v))
decoder_weights = os.path.join(configdir, 'weights.final')
self.decoder = decoder.MIRADecoder(decoder_config_file, decoder_weights, metric=metric)
def close(self, force=False):
logger.info('Closing decoder and removing {}'.format(self.tmp))
self.decoder.close(force)
if self.hpyplm:
self.ref_fifo.close()
shutil.rmtree(self.tmp)
class RealtimeTranslator:
'''Main entry point into API: serves translations to any number of concurrent users'''
def __init__(self, configdir, tmpdir='/tmp', cache_size=5, norm=False):
# name -> (method, set of possible nargs)
self.COMMANDS = {
'TR': (self.translate, set((1,))),
'LEARN': (self.learn, set((2,))),
'SAVE': (self.save_state, set((0, 1))),
'LOAD': (self.load_state, set((0, 1))),
'DROP': (self.drop_ctx, set((0,))),
'LIST': (self.list_ctx, set((0,))),
}
cdec_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# rt.ini options
ini = dict(line.strip().split('=') for line in open(os.path.join(configdir, 'rt.ini')))
self.hpyplm = (ini.get('hpyplm', 'false') in TRUE)
self.metric = ini.get('metric', 'ibm_bleu')
### Single instance for all contexts
self.config = configdir
# Temporary work dir
self.tmp = tempfile.mkdtemp(dir=tmpdir, prefix='realtime.')
logger.info('Using temp dir {}'.format(self.tmp))
# Normalization
self.norm = norm
if self.norm:
self.tokenizer = util.popen_io([os.path.join(cdec_root, 'corpus', 'tokenize-anything.sh'), '-u'])
self.tokenizer_lock = util.FIFOLock()
self.detokenizer = util.popen_io([os.path.join(cdec_root, 'corpus', 'untok.pl')])
self.detokenizer_lock = util.FIFOLock()
# Word aligner
fwd_params = os.path.join(configdir, 'a.fwd_params')
fwd_err = os.path.join(configdir, 'a.fwd_err')
rev_params = os.path.join(configdir, 'a.rev_params')
rev_err = os.path.join(configdir, 'a.rev_err')
self.aligner = aligner.ForceAligner(fwd_params, fwd_err, rev_params, rev_err)
# Grammar extractor
sa_config = cdec.configobj.ConfigObj(os.path.join(configdir, 'sa.ini'), unrepr=True)
sa_config.filename = os.path.join(self.tmp, 'sa.ini')
util.sa_ini_for_realtime(sa_config, os.path.abspath(configdir))
sa_config.write()
self.extractor = ExtractorWrapper(sa_config.filename)
self.cache_size = cache_size
### One instance per context
self.ctx_names = set()
# All context-dependent operations are atomic
self.ctx_locks = collections.defaultdict(util.FIFOLock)
# ctx -> list of (source, target, alignment)
self.ctx_data = {}
# Grammar extractor is not threadsafe
self.extractor_lock = util.FIFOLock()
# ctx -> deque of file
self.grammar_files = {}
# ctx -> dict of {sentence: file}
self.grammar_dict = {}
self.decoders = {}
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
# Force shutdown on exception
self.close(ex_type is not None)
def close(self, force=False):
'''Cleanup'''
if force:
logger.info('Forced shutdown: stopping immediately')
# Drop contexts before closing processes unless forced
if not force:
for ctx_name in list(self.ctx_names):
self.drop_ctx(ctx_name, force)
logger.info('Closing processes')
self.aligner.close(force)
self.extractor.close(force)
if self.norm:
if not force:
self.tokenizer_lock.acquire()
self.detokenizer_lock.acquire()
self.tokenizer.stdin.close()
self.tokenizer.wait()
self.detokenizer.stdin.close()
self.detokenizer.wait()
if not force:
self.tokenizer_lock.release()
self.detokenizer_lock.release()
logger.info('Deleting {}'.format(self.tmp))
shutil.rmtree(self.tmp)
def lazy_ctx(self, ctx_name):
'''Initialize a context (inc starting a new decoder) if needed.
NOT threadsafe, acquire ctx_name lock before calling.'''
if ctx_name in self.ctx_names:
return
logger.info('({}) New context'.format(ctx_name))
self.ctx_names.add(ctx_name)
self.ctx_data[ctx_name] = []
self.grammar_files[ctx_name] = collections.deque()
self.grammar_dict[ctx_name] = {}
tmpdir = os.path.join(self.tmp, 'decoder.{}'.format(ctx_name))
self.decoders[ctx_name] = RealtimeDecoder(self.config, tmpdir, hpyplm=self.hpyplm, metric=self.metric)
def drop_ctx(self, ctx_name=None, force=False):
'''Delete a context (inc stopping the decoder)
Threadsafe and FIFO unless forced.'''
lock = self.ctx_locks[ctx_name]
if not force:
lock.acquire()
if ctx_name not in self.ctx_names:
logger.info('({}) No context found, no action taken'.format(ctx_name))
if not force:
lock.release()
return
logger.info('({}) Dropping context'.format(ctx_name))
self.ctx_names.remove(ctx_name)
self.ctx_data.pop(ctx_name)
self.extractor.drop_ctx(ctx_name)
self.grammar_files.pop(ctx_name)
self.grammar_dict.pop(ctx_name)
self.decoders.pop(ctx_name).close(force)
self.ctx_locks.pop(ctx_name)
if not force:
lock.release()
def list_ctx(self, ctx_name=None):
'''Return a string of active contexts'''
return 'ctx_name ||| {}'.format(' '.join(sorted(str(ctx_name) for ctx_name in self.ctx_names)))
def grammar(self, sentence, ctx_name=None):
'''Extract a sentence-level grammar on demand (or return cached)
Threadsafe wrt extractor but NOT decoder. Acquire ctx_name lock
before calling.'''
self.extractor_lock.acquire()
self.lazy_ctx(ctx_name)
grammar_dict = self.grammar_dict[ctx_name]
grammar_file = grammar_dict.get(sentence, None)
# Cache hit
if grammar_file:
logger.info('({}) Grammar cache hit: {}'.format(ctx_name, grammar_file))
self.extractor_lock.release()
return grammar_file
# Extract and cache
(fid, grammar_file) = tempfile.mkstemp(dir=self.decoders[ctx_name].tmp, prefix='grammar.', suffix='.gz')
os.close(fid)
self.extractor.grammar(sentence, grammar_file, ctx_name)
grammar_files = self.grammar_files[ctx_name]
if len(grammar_files) == self.cache_size:
rm_sent = grammar_files.popleft()
# If not already removed by learn method
if rm_sent in grammar_dict:
rm_grammar = grammar_dict.pop(rm_sent)
os.remove(rm_grammar)
grammar_files.append(sentence)
grammar_dict[sentence] = grammar_file
self.extractor_lock.release()
return grammar_file
def translate(self, sentence, ctx_name=None):
'''Decode a sentence (inc extracting a grammar if needed)
Threadsafe, FIFO'''
lock = self.ctx_locks[ctx_name]
lock.acquire()
self.lazy_ctx(ctx_name)
# Empty in, empty out
if sentence.strip() == '':
lock.release()
return ''
if self.norm:
sentence = self.tokenize(sentence)
logger.info('({}) Normalized input: {}'.format(ctx_name, sentence))
grammar_file = self.grammar(sentence, ctx_name)
decoder = self.decoders[ctx_name]
start_time = time.time()
hyp = decoder.decoder.decode(sentence, grammar_file)
stop_time = time.time()
logger.info('({}) Translation time: {} seconds'.format(ctx_name, stop_time - start_time))
# Empty reference: HPYPLM does not learn prior to next translation
if self.hpyplm:
decoder.ref_fifo.write('\n')
decoder.ref_fifo.flush()
if self.norm:
logger.info('({}) Normalized translation: {}'.format(ctx_name, hyp))
hyp = self.detokenize(hyp)
lock.release()
return hyp
def tokenize(self, line):
self.tokenizer_lock.acquire()
self.tokenizer.stdin.write('{}\n'.format(line))
tok_line = self.tokenizer.stdout.readline().strip()
self.tokenizer_lock.release()
return tok_line
def detokenize(self, line):
self.detokenizer_lock.acquire()
self.detokenizer.stdin.write('{}\n'.format(line))
detok_line = self.detokenizer.stdout.readline().strip()
self.detokenizer_lock.release()
return detok_line
def command_line(self, line, ctx_name=None):
# COMMAND [ctx_name] ||| arg1 [||| arg2 ...]
args = [f.strip() for f in line.split('|||')]
if args[-1] == '':
args = args[:-1]
if len(args) > 0:
cmd_name = args[0].split()
# ctx_name provided
if len(cmd_name) == 2:
(cmd_name, ctx_name) = cmd_name
# ctx_name default/passed
else:
cmd_name = cmd_name[0]
(command, nargs) = self.COMMANDS.get(cmd_name, (None, None))
if command and len(args[1:]) in nargs:
logger.info('({}) {} ||| {}'.format(ctx_name, cmd_name, ' ||| '.join(args[1:])))
return command(*args[1:], ctx_name=ctx_name)
logger.info('ERROR: command: {}'.format(' ||| '.join(args)))
def learn(self, source, target, ctx_name=None):
'''Learn from training instance (inc extracting grammar if needed)
Threadsafe, FIFO'''
lock = self.ctx_locks[ctx_name]
lock.acquire()
self.lazy_ctx(ctx_name)
if '' in (source.strip(), target.strip()):
logger.info('({}) ERROR: empty source or target: {} ||| {}'.format(ctx_name, source, target))
lock.release()
return
if self.norm:
source = self.tokenize(source)
target = self.tokenize(target)
# Align instance
alignment = self.aligner.align(source, target)
grammar_file = self.grammar(source, ctx_name)
# MIRA update before adding data to grammar extractor
decoder = self.decoders[ctx_name]
mira_log = decoder.decoder.update(source, grammar_file, target)
logger.info('({}) MIRA HBF: {}'.format(ctx_name, mira_log))
# Add to HPYPLM by writing to fifo (read on next translation)
if self.hpyplm:
logger.info('({}) Adding to HPYPLM: {}'.format(ctx_name, target))
decoder.ref_fifo.write('{}\n'.format(target))
decoder.ref_fifo.flush()
# Store incremental data for save/load
self.ctx_data[ctx_name].append((source, target, alignment))
# Add aligned sentence pair to grammar extractor
logger.info('({}) Adding to bitext: {} ||| {} ||| {}'.format(ctx_name, source, target, alignment))
self.extractor.add_instance(source, target, alignment, ctx_name)
# Clear (old) cached grammar
rm_grammar = self.grammar_dict[ctx_name].pop(source)
os.remove(rm_grammar)
lock.release()
def save_state(self, file_or_stringio=None, ctx_name=None):
'''Write state (several lines terminated by EOF line) to file, buffer, or stdout'''
lock = self.ctx_locks[ctx_name]
lock.acquire()
self.lazy_ctx(ctx_name)
ctx_data = self.ctx_data[ctx_name]
# Filename, StringIO or None (stdout)
if file_or_stringio:
if isinstance(file_or_stringio, StringIO.StringIO):
out = file_or_stringio
else:
out = open(file_or_stringio, 'w')
else:
out = sys.stdout
logger.info('({}) Saving state with {} sentences'.format(ctx_name, len(ctx_data)))
out.write('{}\n'.format(self.decoders[ctx_name].decoder.get_weights()))
for (source, target, alignment) in ctx_data:
out.write('{} ||| {} ||| {}\n'.format(source, target, alignment))
out.write('EOF\n')
# Close if file
if file_or_stringio and not isinstance(file_or_stringio, StringIO.StringIO):
out.close()
lock.release()
def load_state(self, file_or_stringio=None, ctx_name=None):
'''Load state (several lines terminated by EOF line) from file, buffer, or stdin.
Restarts context on any error.'''
lock = self.ctx_locks[ctx_name]
lock.acquire()
self.lazy_ctx(ctx_name)
ctx_data = self.ctx_data[ctx_name]
decoder = self.decoders[ctx_name]
# Filename, StringIO, or None (stdin)
if file_or_stringio:
if isinstance(file_or_stringio, StringIO.StringIO):
input = file_or_stringio
else:
input = open(file_or_stringio)
else:
input = sys.stdin
# Non-initial load error
if ctx_data:
logger.info('({}) ERROR: Incremental data has already been added to context'.format(ctx_name))
logger.info(' State can only be loaded to a new context.')
lock.release()
return
# Many things can go wrong if bad state data is given
try:
# MIRA weights
line = input.readline().strip()
# Throws exception if bad line
decoder.decoder.set_weights(line)
logger.info('({}) Loading state...'.format(ctx_name))
start_time = time.time()
# Lines source ||| target ||| alignment
while True:
line = input.readline()
if not line:
raise Exception('End of file before EOF line')
line = line.strip()
if line == 'EOF':
break
(source, target, alignment) = line.split(' ||| ')
ctx_data.append((source, target, alignment))
# Extractor
self.extractor.add_instance(source, target, alignment, ctx_name)
# HPYPLM
if self.hpyplm:
hyp = decoder.decoder.decode(LIKELY_OOV)
decoder.ref_fifo.write('{}\n'.format(target))
decoder.ref_fifo.flush()
stop_time = time.time()
logger.info('({}) Loaded state with {} sentences in {} seconds'.format(ctx_name, len(ctx_data), stop_time - start_time))
lock.release()
# Recover from bad load attempt by restarting context.
# Guaranteed not to cause data loss since only a new context can load state.
except:
logger.info('({}) ERROR: could not load state, restarting context'.format(ctx_name))
# ctx_name is already owned and needs to be restarted before other blocking threads use
self.drop_ctx(ctx_name, force=True)
self.lazy_ctx(ctx_name)
lock.release()
| apache-2.0 |
lukecwik/incubator-beam | sdks/python/apache_beam/examples/complete/distribopt.py | 3 | 13498 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Example illustrating the use of Apache Beam for solving distributing
optimization tasks.
This example solves an optimization problem which consists of distributing a
number of crops to grow in several greenhouses. The decision where to grow the
crop has an impact on the production parameters associated with the greenhouse,
which affects the total cost of production at the greenhouse. Additionally,
each crop needs to be transported to a customer so the decision where to grow
the crop has an impact on the transportation costs as well.
This type of optimization problems are known as mixed-integer programs as they
exist of discrete parameters (do we produce a crop in greenhouse A, B or C?)
and continuous parameters (the greenhouse production parameters).
Running this example requires NumPy and SciPy. The input consists of a CSV file
with the following columns (Tx representing the transporation cost/unit if the
crop is produced in greenhouse x): Crop name, Quantity, Ta, Tb, Tc, ....
Example input file with 5 crops and 3 greenhouses (a transporation cost of 0
forbids production of the crop in a greenhouse):
OP01,8,12,0,12
OP02,30,14,3,12
OP03,25,7,3,14
OP04,87,7,2,2
OP05,19,1,7,10
The pipeline consists of three phases:
- Creating a grid of mappings (assignment of each crop to a greenhouse)
- For each mapping and each greenhouse, optimization of the production
parameters for cost, addition of the transporation costs, and aggregation
of the costs for each mapping.
- Selecting the mapping with the lowest cost.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import argparse
import logging
import string
import uuid
from collections import defaultdict
import numpy as np
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from scipy.optimize import minimize
class Simulator(object):
"""Greenhouse simulation for the optimization of greenhouse parameters."""
def __init__(self, quantities):
self.quantities = np.atleast_1d(quantities)
self.A = np.array([[3.0, 10, 30], [0.1, 10, 35], [3.0, 10, 30],
[0.1, 10, 35]])
self.P = 1e-4 * np.array([[3689, 1170, 2673], [4699, 4387, 7470],
[1091, 8732, 5547], [381, 5743, 8828]])
a0 = np.array([[1.0, 1.2, 3.0, 3.2]])
coeff = np.sum(np.cos(np.dot(a0.T, self.quantities[None, :])), axis=1)
self.alpha = coeff / np.sum(coeff)
def simulate(self, xc):
# Map the input parameter to a cost for each crop.
weighted_distance = np.sum(self.A * np.square(xc - self.P), axis=1)
f = -np.sum(self.alpha * np.exp(-weighted_distance))
return np.square(f) * np.log(self.quantities)
class CreateGrid(beam.PTransform):
"""A transform for generating the mapping grid.
Input: Formatted records of the input file, e.g.,
{
'crop': 'OP009',
'quantity': 102,
'transport_costs': [('A', None), ('B', 3), ('C', 8)]
}
Output: tuple (mapping_identifier, {crop -> greenhouse})
"""
class PreGenerateMappings(beam.DoFn):
"""ParDo implementation forming based on two elements a small sub grid.
This facilitates parallellization of the grid generation.
Emits two PCollections: the subgrid represented as collection of lists of
two tuples, and a list of remaining records. Both serve as an input to
GenerateMappings.
"""
def process(self, element):
records = list(element[1])
# Split of 2 crops and pre-generate the subgrid.
# Select the crop with highest number of possible greenhouses:
# in case two crops with only a single possible greenhouse were selected
# the subgrid would consist of only 1 element.
best_split = np.argsort([-len(r['transport_costs']) for r in records])[:2]
rec1 = records[best_split[0]]
rec2 = records[best_split[1]]
# Generate & emit all combinations
for a in rec1['transport_costs']:
if a[1]:
for b in rec2['transport_costs']:
if b[1]:
combination = [(rec1['crop'], a[0]), (rec2['crop'], b[0])]
yield pvalue.TaggedOutput('splitted', combination)
# Pass on remaining records
remaining = [rec for i, rec in enumerate(records) if i not in best_split]
yield pvalue.TaggedOutput('combine', remaining)
class GenerateMappings(beam.DoFn):
"""ParDo implementation to generate all possible mappings.
Input: output of PreGenerateMappings
Output: tuples of the form (mapping_identifier, {crop -> greenhouse})
"""
@staticmethod
def _coordinates_to_greenhouse(coordinates, greenhouses, crops):
# Map the grid coordinates back to greenhouse labels
arr = []
for coord in coordinates:
arr.append(greenhouses[coord])
return dict(zip(crops, np.array(arr)))
def process(self, element, records):
# Generate available greenhouses and grid coordinates for each crop.
grid_coordinates = []
for rec in records:
# Get indices for available greenhouses (w.r.t crops)
filtered = [i for i, av in enumerate(rec['transport_costs']) if av[1]]
grid_coordinates.append(filtered)
# Generate all mappings
grid = np.vstack(list(map(np.ravel, np.meshgrid(*grid_coordinates)))).T
crops = [rec['crop'] for rec in records]
greenhouses = [rec[0] for rec in records[0]['transport_costs']]
for point in grid:
# translate back to greenhouse label
mapping = self._coordinates_to_greenhouse(point, greenhouses, crops)
assert all(rec[0] not in mapping for rec in element)
# include the incomplete mapping of 2 crops
mapping.update(element)
# include identifier
yield (uuid.uuid4().hex, mapping)
def expand(self, records):
o = (
records
| 'pair one' >> beam.Map(lambda x: (1, x))
| 'group all records' >> beam.GroupByKey()
| 'split one of' >> beam.ParDo(self.PreGenerateMappings()).with_outputs(
'splitted', 'combine'))
# Create mappings, and prevent fusion (this limits the parallelization
# in the optimization step)
mappings = (
o.splitted
| 'create mappings' >> beam.ParDo(
self.GenerateMappings(), pvalue.AsSingleton(o.combine))
| 'prevent fusion' >> beam.Reshuffle())
return mappings
class OptimizeGrid(beam.PTransform):
"""A transform for optimizing all greenhouses of the mapping grid."""
class CreateOptimizationTasks(beam.DoFn):
"""
Create tasks for optimization.
Input: (mapping_identifier, {crop -> greenhouse})
Output: ((mapping_identifier, greenhouse), [(crop, quantity),...])
"""
def process(self, element, quantities):
mapping_identifier, mapping = element
# Create (crop, quantity) lists for each greenhouse
greenhouses = defaultdict(list)
for crop, greenhouse in mapping.items():
quantity = quantities[crop]
greenhouses[greenhouse].append((crop, quantity))
# Create input for OptimizeProductParameters
for greenhouse, crops in greenhouses.items():
key = (mapping_identifier, greenhouse)
yield (key, crops)
class OptimizeProductParameters(beam.DoFn):
"""Solve the optimization task to determine optimal production parameters.
Input: ((mapping_identifier, greenhouse), [(crop, quantity),...])
Two outputs:
- solution: (mapping_identifier, (greenhouse, [production parameters]))
- costs: (crop, greenhouse, mapping_identifier, cost)
"""
@staticmethod
def _optimize_production_parameters(sim):
# setup initial starting point & bounds
x0 = 0.5 * np.ones(3)
bounds = list(zip(np.zeros(3), np.ones(3)))
# Run L-BFGS-B optimizer
result = minimize(lambda x: np.sum(sim.simulate(x)), x0, bounds=bounds)
return result.x.tolist(), sim.simulate(result.x)
def process(self, element):
mapping_identifier, greenhouse = element[0]
crops, quantities = zip(*element[1])
sim = Simulator(quantities)
optimum, costs = self._optimize_production_parameters(sim)
solution = (mapping_identifier, (greenhouse, optimum))
yield pvalue.TaggedOutput('solution', solution)
for crop, cost, quantity in zip(crops, costs, quantities):
costs = (crop, greenhouse, mapping_identifier, cost * quantity)
yield pvalue.TaggedOutput('costs', costs)
def expand(self, inputs):
mappings, quantities = inputs
opt = (
mappings
| 'optimization tasks' >> beam.ParDo(
self.CreateOptimizationTasks(), pvalue.AsDict(quantities))
| 'optimize' >> beam.ParDo(
self.OptimizeProductParameters()).with_outputs('costs', 'solution'))
return opt
class CreateTransportData(beam.DoFn):
"""Transform records to pvalues ((crop, greenhouse), transport_cost)"""
def process(self, record):
crop = record['crop']
for greenhouse, transport_cost in record['transport_costs']:
yield ((crop, greenhouse), transport_cost)
def add_transport_costs(element, transport, quantities):
"""Adds the transport cost for the crop to the production cost.
elements are of the form (crop, greenhouse, mapping, cost), the cost only
corresponds to the production cost. Return the same format, but including
the transport cost.
"""
crop = element[0]
cost = element[3]
# lookup & compute cost
transport_key = element[:2]
transport_cost = transport[transport_key] * quantities[crop]
return element[:3] + (cost + transport_cost, )
def parse_input(line):
# Process each line of the input file to a dict representing each crop
# and the transport costs
columns = line.split(',')
# Assign each greenhouse a character
transport_costs = []
for greenhouse, cost in zip(string.ascii_uppercase, columns[2:]):
info = (greenhouse, int(cost) if cost else None)
transport_costs.append(info)
return {
'crop': columns[0],
'quantity': int(columns[1]),
'transport_costs': transport_costs
}
def format_output(element):
"""Transforms the datastructure (unpack lists introduced by CoGroupByKey)
before writing the result to file.
"""
result = element[1]
result['cost'] = result['cost'][0]
result['production'] = dict(result['production'])
result['mapping'] = result['mapping'][0]
return result
def run(argv=None, save_main_session=True):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
required=True,
help='Input description to process.')
parser.add_argument(
'--output',
dest='output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
with beam.Pipeline(options=pipeline_options) as p:
# Parse input file
records = (
p
| 'read' >> beam.io.ReadFromText(known_args.input)
| 'process input' >> beam.Map(parse_input))
# Create two pcollections, used as side inputs
transport = (
records
| 'create transport' >> beam.ParDo(CreateTransportData()))
quantities = (
records
| 'create quantities' >> beam.Map(lambda r: (r['crop'], r['quantity'])))
# Generate all mappings and optimize greenhouse production parameters
mappings = records | CreateGrid()
opt = (mappings, quantities) | OptimizeGrid()
# Then add the transport costs and sum costs per crop.
costs = (
opt.costs
| 'include transport' >> beam.Map(
add_transport_costs,
pvalue.AsDict(transport),
pvalue.AsDict(quantities))
| 'drop crop and greenhouse' >> beam.Map(lambda x: (x[2], x[3]))
| 'aggregate crops' >> beam.CombinePerKey(sum))
# Join cost, mapping and production settings solution on mapping identifier.
# Then select best.
join_operands = {
'cost': costs, 'production': opt.solution, 'mapping': mappings
}
best = (
join_operands
| 'join' >> beam.CoGroupByKey()
| 'select best' >> beam.CombineGlobally(
min, key=lambda x: x[1]['cost']).without_defaults()
| 'format output' >> beam.Map(format_output))
# pylint: disable=expression-not-assigned
best | 'write optimum' >> beam.io.WriteToText(known_args.output)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| apache-2.0 |
t-tran/libcloud | libcloud/test/storage/test_local.py | 39 | 11565 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
import shutil
import unittest
import tempfile
import mock
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Container
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import InvalidContainerNameError
try:
from libcloud.storage.drivers.local import LocalStorageDriver
from libcloud.storage.drivers.local import LockLocalStorage
from lockfile import LockTimeout
except ImportError:
print('lockfile library is not available, skipping local_storage tests...')
LocalStorageDriver = None
LockTimeout = None
class LocalTests(unittest.TestCase):
driver_type = LocalStorageDriver
@classmethod
def create_driver(self):
self.key = tempfile.mkdtemp()
return self.driver_type(self.key, None)
def setUp(self):
self.driver = self.create_driver()
def tearDown(self):
shutil.rmtree(self.key)
self.key = None
def make_tmp_file(self):
_, tmppath = tempfile.mkstemp()
with open(tmppath, 'wb') as fp:
fp.write(b'blah' * 1024)
return tmppath
def remove_tmp_file(self, tmppath):
os.unlink(tmppath)
def test_list_containers_empty(self):
containers = self.driver.list_containers()
self.assertEqual(len(containers), 0)
def test_containers_success(self):
self.driver.create_container('test1')
self.driver.create_container('test2')
containers = self.driver.list_containers()
self.assertEqual(len(containers), 2)
container = containers[1]
self.assertTrue('creation_time' in container.extra)
self.assertTrue('modify_time' in container.extra)
self.assertTrue('access_time' in container.extra)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 0)
objects = container.list_objects()
self.assertEqual(len(objects), 0)
for container in containers:
self.driver.delete_container(container)
def test_objects_success(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test3')
obj1 = container.upload_object(tmppath, 'object1')
obj2 = container.upload_object(tmppath, 'path/object2')
obj3 = container.upload_object(tmppath, 'path/to/object3')
obj4 = container.upload_object(tmppath, 'path/to/object4.ext')
with open(tmppath, 'rb') as tmpfile:
obj5 = container.upload_object_via_stream(tmpfile, 'object5')
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 5)
for obj in objects:
self.assertNotEqual(obj.hash, None)
self.assertEqual(obj.size, 4096)
self.assertEqual(obj.container.name, 'test3')
self.assertTrue('creation_time' in obj.extra)
self.assertTrue('modify_time' in obj.extra)
self.assertTrue('access_time' in obj.extra)
obj1.delete()
obj2.delete()
objects = container.list_objects()
self.assertEqual(len(objects), 3)
container.delete_object(obj3)
container.delete_object(obj4)
container.delete_object(obj5)
objects = container.list_objects()
self.assertEqual(len(objects), 0)
container.delete()
self.remove_tmp_file(tmppath)
def test_get_container_doesnt_exist(self):
try:
self.driver.get_container(container_name='container1')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_container_success(self):
self.driver.create_container('test4')
container = self.driver.get_container(container_name='test4')
self.assertTrue(container.name, 'test4')
container.delete()
def test_get_object_container_doesnt_exist(self):
try:
self.driver.get_object(container_name='test-inexistent',
object_name='test')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_object_success(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test5')
container.upload_object(tmppath, 'test')
obj = self.driver.get_object(container_name='test5',
object_name='test')
self.assertEqual(obj.name, 'test')
self.assertEqual(obj.container.name, 'test5')
self.assertEqual(obj.size, 4096)
self.assertNotEqual(obj.hash, None)
self.assertTrue('creation_time' in obj.extra)
self.assertTrue('modify_time' in obj.extra)
self.assertTrue('access_time' in obj.extra)
obj.delete()
container.delete()
self.remove_tmp_file(tmppath)
def test_create_container_invalid_name(self):
try:
self.driver.create_container(container_name='new/container')
except InvalidContainerNameError:
pass
else:
self.fail('Exception was not thrown')
def test_create_container_already_exists(self):
container = self.driver.create_container(
container_name='new-container')
try:
self.driver.create_container(container_name='new-container')
except ContainerAlreadyExistsError:
pass
else:
self.fail('Exception was not thrown')
# success
self.driver.delete_container(container)
def test_create_container_success(self):
name = 'new_container'
container = self.driver.create_container(container_name=name)
self.assertEqual(container.name, name)
self.driver.delete_container(container)
def test_delete_container_doesnt_exist(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_container_not_empty(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test6')
obj = container.upload_object(tmppath, 'test')
try:
self.driver.delete_container(container=container)
except ContainerIsNotEmptyError:
pass
else:
self.fail('Exception was not thrown')
# success
obj.delete()
self.remove_tmp_file(tmppath)
self.assertTrue(self.driver.delete_container(container=container))
def test_delete_container_not_found(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail('Container does not exist but an exception was not' +
'thrown')
def test_delete_container_success(self):
container = self.driver.create_container('test7')
self.assertTrue(self.driver.delete_container(container=container))
def test_download_object_success(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test6')
obj = container.upload_object(tmppath, 'test')
destination_path = tmppath + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
obj.delete()
container.delete()
self.remove_tmp_file(tmppath)
os.unlink(destination_path)
def test_download_object_and_overwrite(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test6')
obj = container.upload_object(tmppath, 'test')
destination_path = tmppath + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
try:
self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
except LibcloudError:
pass
else:
self.fail('Exception was not thrown')
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=True,
delete_on_failure=True)
self.assertTrue(result)
# success
obj.delete()
container.delete()
self.remove_tmp_file(tmppath)
os.unlink(destination_path)
def test_download_object_as_stream_success(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test6')
obj = container.upload_object(tmppath, 'test')
stream = self.driver.download_object_as_stream(obj=obj,
chunk_size=1024)
self.assertTrue(hasattr(stream, '__iter__'))
data = b''.join(stream)
self.assertTrue(len(data), 4096)
obj.delete()
container.delete()
self.remove_tmp_file(tmppath)
@mock.patch("lockfile.mkdirlockfile.MkdirLockFile.acquire",
mock.MagicMock(side_effect=LockTimeout))
def test_proper_lockfile_imports(self):
# LockLocalStorage was previously using an un-imported exception
# in its __enter__ method, so the following would raise a NameError.
lls = LockLocalStorage("blah")
self.assertRaises(LibcloudError, lls.__enter__)
if not LocalStorageDriver:
class LocalTests(unittest.TestCase): # NOQA
pass
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
facetothefate/contrail-controller | src/config/utils/provision_encap.py | 13 | 5232 | #!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import argparse
import ConfigParser
from vnc_api.vnc_api import *
class EncapsulationProvision(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
self._vnc_lib = VncApi(
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/')
encap_obj=EncapsulationPrioritiesType(encapsulation=self._args.encap_priority.split(","))
try:
current_config=self._vnc_lib.global_vrouter_config_read(
fq_name=['default-global-system-config',
'default-global-vrouter-config'])
except Exception as e:
if self._args.oper == "add":
conf_obj=GlobalVrouterConfig(encapsulation_priorities=encap_obj,vxlan_network_identifier_mode=self._args.vxlan_vn_id_mode)
result=self._vnc_lib.global_vrouter_config_create(conf_obj)
print 'Created.UUID is %s'%(result)
return
current_linklocal=current_config.get_linklocal_services()
encapsulation_priorities=encap_obj
vxlan_network_identifier_mode=current_config.get_vxlan_network_identifier_mode()
if self._args.oper != "add":
encap_obj=EncapsulationPrioritiesType(encapsulation=[])
conf_obj=GlobalVrouterConfig(linklocal_services=current_linklocal,
encapsulation_priorities=encap_obj)
else :
conf_obj=GlobalVrouterConfig(linklocal_services=current_linklocal,
encapsulation_priorities=encapsulation_priorities,
vxlan_network_identifier_mode=self._args.vxlan_vn_id_mode)
result=self._vnc_lib.global_vrouter_config_update(conf_obj)
print 'Updated.%s'%(result)
# end __init__
def _parse_args(self, args_str):
'''
Eg. python provision_encap.py
--api_server_ip 127.0.0.1
--api_server_port 8082
--encap_priority "MPLSoUDP,MPLSoGRE,VXLAN"
--vxlan_vn_id_mode "automatic"
--oper <add | delete>
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'oper': 'add',
'encap_priority': 'MPLSoUDP,MPLSoGRE,VXLAN',
'vxlan_vn_id_mode' : 'automatic'
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'admin'
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--api_server_ip", help="IP address of api server")
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument(
"--encap_priority", help="List of Encapsulation priority", required=True)
parser.add_argument(
"--vxlan_vn_id_mode", help="Virtual Network id type to be used")
parser.add_argument(
"--oper", default='add',help="Provision operation to be done(add or delete)")
parser.add_argument(
"--admin_user", help="Name of keystone admin user", required=True)
parser.add_argument(
"--admin_password", help="Password of keystone admin user", required=True)
self._args = parser.parse_args(remaining_argv)
if not self._args.encap_priority:
parser.error('encap_priority is required')
# end _parse_args
# end class EncapsulationProvision
def main(args_str=None):
EncapsulationProvision(args_str)
# end main
if __name__ == "__main__":
main()
| apache-2.0 |
lamdnhan/osf.io | tests/test_oauth.py | 2 | 16240 | import httplib as http
import json
import responses
import time
import urlparse
from nose.tools import * # noqa
from framework.auth import authenticate
from framework.exceptions import PermissionsError, HTTPError
from framework.sessions import get_session
from website.oauth.models import (
ExternalAccount,
ExternalProvider,
OAUTH1,
OAUTH2,
)
from website.util import api_url_for, web_url_for
from tests.base import OsfTestCase
from tests.factories import (
AuthUserFactory,
ExternalAccountFactory,
MockOAuth2Provider,
UserFactory,
)
class MockOAuth1Provider(ExternalProvider):
_oauth_version = OAUTH1
name = "Mock OAuth 1.0a Provider"
short_name = "mock1a"
client_id = "mock1a_client_id"
client_secret = "mock1a_client_secret"
auth_url_base = "http://mock1a.com/auth"
request_token_url = "http://mock1a.com/request"
callback_url = "http://mock1a.com/callback"
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
def _prepare_mock_oauth2_handshake_response(expires_in=3600):
responses.add(
responses.POST,
'https://mock2.com/callback',
body=json.dumps({
'access_token': 'mock_access_token',
'expires_at': time.time() + expires_in,
'expires_in': expires_in,
'refresh_token': 'mock_refresh_token',
'scope': ['all'],
'token_type': 'bearer',
}),
status=200,
content_type='application/json',
)
def _prepare_mock_500_error():
responses.add(
responses.POST,
'https://mock2.com/callback',
body='{"error": "not found"}',
status=503,
content_type='application/json',
)
class TestExternalAccount(OsfTestCase):
# Test the ExternalAccount object and associated views.
#
# Functionality not specific to the OAuth version used by the
# ExternalProvider should go here.
def setUp(self):
super(TestExternalAccount, self).setUp()
self.user = AuthUserFactory()
self.provider = MockOAuth2Provider()
def tearDown(self):
ExternalAccount._clear_caches()
ExternalAccount.remove()
self.user.remove()
super(TestExternalAccount, self).tearDown()
def test_disconnect(self):
# Disconnect an external account from a user
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
)
self.user.external_accounts.append(external_account)
self.user.save()
# If the external account isn't attached, this test has no meaning
assert_equal(ExternalAccount.find().count(), 1)
assert_in(
external_account,
self.user.external_accounts,
)
response = self.app.delete(
api_url_for('oauth_disconnect',
external_account_id=external_account._id),
auth=self.user.auth
)
# Request succeeded
assert_equal(
response.status_code,
http.OK,
)
self.user.reload()
# external_account.reload()
# External account has been disassociated with the user
assert_not_in(
external_account,
self.user.external_accounts,
)
# External account is still in the database
assert_equal(ExternalAccount.find().count(), 1)
def test_disconnect_with_multiple_connected(self):
# Disconnect an account connected to multiple users from one user
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
)
self.user.external_accounts.append(external_account)
self.user.save()
other_user = UserFactory()
other_user.external_accounts.append(external_account)
other_user.save()
response = self.app.delete(
api_url_for('oauth_disconnect',
external_account_id=external_account._id),
auth=self.user.auth
)
# Request succeeded
assert_equal(
response.status_code,
http.OK,
)
self.user.reload()
# External account has been disassociated with the user
assert_not_in(
external_account,
self.user.external_accounts,
)
# External account is still in the database
assert_equal(ExternalAccount.find().count(), 1)
other_user.reload()
# External account is still associated with the other user
assert_in(
external_account,
other_user.external_accounts,
)
class TestExternalProviderOAuth1(OsfTestCase):
# Test functionality of the ExternalProvider class, for OAuth 1.0a
def setUp(self):
super(TestExternalProviderOAuth1, self).setUp()
self.user = UserFactory()
self.provider = MockOAuth1Provider()
def tearDown(self):
ExternalAccount.remove()
self.user.remove()
super(TestExternalProviderOAuth1, self).tearDown()
@responses.activate
def test_start_flow(self):
# Request temporary credentials from provider, provide auth redirect
responses.add(responses.POST, 'http://mock1a.com/request',
body='{"oauth_token_secret": "temp_secret", '
'"oauth_token": "temp_token", '
'"oauth_callback_confirmed": "true"}',
status=200,
content_type='application/json')
with self.app.app.test_request_context('/oauth/connect/mock1a/'):
# make sure the user is logged in
authenticate(user=self.user, response=None)
# auth_url is a property method - it calls out to the external
# service to get a temporary key and secret before returning the
# auth url
url = self.provider.auth_url
# The URL to which the user would be redirected
assert_equal(url, "http://mock1a.com/auth?oauth_token=temp_token")
session = get_session()
# Temporary credentials are added to the session
creds = session.data['oauth_states'][self.provider.short_name]
assert_equal(creds['token'], 'temp_token')
assert_equal(creds['secret'], 'temp_secret')
@responses.activate
def test_callback(self):
# Exchange temporary credentials for permanent credentials
# mock a successful call to the provider to exchange temp keys for
# permanent keys
responses.add(
responses.POST,
'http://mock1a.com/callback',
body=(
'oauth_token=perm_token'
'&oauth_token_secret=perm_secret'
'&oauth_callback_confirmed=true'
),
)
user = UserFactory()
# Fake a request context for the callback
ctx = self.app.app.test_request_context(
path='/oauth/callback/mock1a/',
query_string='oauth_token=temp_key&oauth_verifier=mock_verifier',
)
with ctx:
# make sure the user is logged in
authenticate(user=user, response=None)
session = get_session()
session.data['oauth_states'] = {
self.provider.short_name: {
'token': 'temp_key',
'secret': 'temp_secret',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user)
account = ExternalAccount.find_one()
assert_equal(account.oauth_key, 'perm_token')
assert_equal(account.oauth_secret, 'perm_secret')
assert_equal(account.provider_id, 'mock_provider_id')
assert_equal(account.provider_name, 'Mock OAuth 1.0a Provider')
@responses.activate
def test_callback_wrong_user(self):
# Reject temporary credentials not assigned to the user
#
# This prohibits users from associating their external account with
# another user's OSF account by using XSS or similar attack vector to
# complete the OAuth flow using the logged-in user but their own account
# on the external service.
#
# If the OSF were to allow login via OAuth with the provider in question,
# this would allow attackers to hijack OSF accounts with a simple script
# injection.
# mock a successful call to the provider to exchange temp keys for
# permanent keys
responses.add(
responses.POST,
'http://mock1a.com/callback',
body='oauth_token=perm_token'
'&oauth_token_secret=perm_secret'
'&oauth_callback_confirmed=true',
)
user = UserFactory()
account = ExternalAccountFactory(
provider="mock1a",
provider_name='Mock 1A',
oauth_key="temp_key",
oauth_secret="temp_secret",
temporary=True
)
account.save()
# associate this ExternalAccount instance with the user
user.external_accounts.append(account)
user.save()
malicious_user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock1a/",
query_string="oauth_token=temp_key&oauth_verifier=mock_verifier"
) as ctx:
# make sure the user is logged in
authenticate(user=malicious_user, response=None)
with assert_raises(PermissionsError):
# do the key exchange
self.provider.auth_callback(user=malicious_user)
class TestExternalProviderOAuth2(OsfTestCase):
# Test functionality of the ExternalProvider class, for OAuth 2.0
def setUp(self):
super(TestExternalProviderOAuth2, self).setUp()
self.user = UserFactory()
self.provider = MockOAuth2Provider()
def tearDown(self):
ExternalAccount._clear_caches()
ExternalAccount.remove()
self.user.remove()
super(TestExternalProviderOAuth2, self).tearDown()
def test_oauth_version_default(self):
# OAuth 2.0 is the default version
assert_is(self.provider._oauth_version, OAUTH2)
def test_start_flow(self):
# Generate the appropriate URL and state token
with self.app.app.test_request_context("/oauth/connect/mock2/") as ctx:
# make sure the user is logged in
authenticate(user=self.user, response=None)
# auth_url is a property method - it calls out to the external
# service to get a temporary key and secret before returning the
# auth url
url = self.provider.auth_url
session = get_session()
# Temporary credentials are added to the session
creds = session.data['oauth_states'][self.provider.short_name]
assert_in('state', creds)
# The URL to which the user would be redirected
parsed = urlparse.urlparse(url)
params = urlparse.parse_qs(parsed.query)
# check parameters
assert_equal(
params,
{
'state': [creds['state']],
'response_type': ['code'],
'client_id': [self.provider.client_id],
'redirect_uri':[
web_url_for('oauth_callback',
service_name=self.provider.short_name,
_absolute=True)
]
}
)
# check base URL
assert_equal(
url.split("?")[0],
"https://mock2.com/auth",
)
@responses.activate
def test_callback(self):
# Exchange temporary credentials for permanent credentials
# Mock the exchange of the code for an access token
_prepare_mock_oauth2_handshake_response()
user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
) as ctx:
# make sure the user is logged in
authenticate(user=user, response=None)
session = get_session()
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user)
account = ExternalAccount.find_one()
assert_equal(account.oauth_key, 'mock_access_token')
assert_equal(account.provider_id, 'mock_provider_id')
@responses.activate
def test_provider_down(self):
# Create a 500 error
_prepare_mock_500_error()
user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
):
# make sure the user is logged in
authenticate(user=user, response=None)
session = get_session()
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
with assert_raises(HTTPError) as error_raised:
self.provider.auth_callback(user=user)
assert_equal(
error_raised.exception.code,
503,
)
@responses.activate
def test_multiple_users_associated(self):
# Create only one ExternalAccount for multiple OSF users
#
# For some providers (ex: GitHub), the act of completing the OAuth flow
# revokes previously generated credentials. In addition, there is often no
# way to know the user's id on the external service until after the flow
# has completed.
#
# Having only one ExternalAccount instance per account on the external
# service means that connecting subsequent OSF users to the same external
# account will not invalidate the credentials used by the OSF for users
# already associated.
user_a = UserFactory()
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
)
user_a.external_accounts.append(external_account)
user_a.save()
user_b = UserFactory()
# Mock the exchange of the code for an access token
_prepare_mock_oauth2_handshake_response()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
) as ctx:
# make sure the user is logged in
authenticate(user=user_b, response=None)
session = get_session()
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user_b)
user_a.reload()
user_b.reload()
external_account.reload()
assert_equal(
user_a.external_accounts,
user_b.external_accounts,
)
assert_equal(
ExternalAccount.find().count(),
1
)
| apache-2.0 |
chafique-delli/OpenUpgrade | addons/hr_timesheet_sheet/report/hr_timesheet_report.py | 38 | 3295 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
from openerp.addons.decimal_precision import decimal_precision as dp
class hr_timesheet_report(osv.osv):
_name = "hr.timesheet.report"
_description = "Timesheet"
_auto = False
_columns = {
'date': fields.date('Date', readonly=True),
'name': fields.char('Description', size=64,readonly=True),
'product_id' : fields.many2one('product.product', 'Product',readonly=True),
'journal_id' : fields.many2one('account.analytic.journal', 'Journal',readonly=True),
'general_account_id' : fields.many2one('account.account', 'General Account', readonly=True),
'user_id': fields.many2one('res.users', 'User',readonly=True),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account',readonly=True),
'company_id': fields.many2one('res.company', 'Company',readonly=True),
'cost': fields.float('Cost',readonly=True, digits_compute=dp.get_precision('Account')),
'quantity': fields.float('Time',readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_timesheet_report')
cr.execute("""
create or replace view hr_timesheet_report as (
select
min(t.id) as id,
l.date as date,
sum(l.amount) as cost,
sum(l.unit_amount) as quantity,
l.account_id as account_id,
l.journal_id as journal_id,
l.product_id as product_id,
l.general_account_id as general_account_id,
l.user_id as user_id,
l.company_id as company_id,
l.currency_id as currency_id
from
hr_analytic_timesheet as t
left join account_analytic_line as l ON (t.line_id=l.id)
group by
l.date,
l.account_id,
l.product_id,
l.general_account_id,
l.journal_id,
l.user_id,
l.company_id,
l.currency_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
repotvsupertuga/tvsupertuga.repository | instal/script.module.livestreamer/lib/livestreamer/plugins/filmon_us.py | 34 | 4358 | import re
from livestreamer.compat import urlparse
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http, validate
from livestreamer.plugin.api.utils import parse_json, parse_query
from livestreamer.stream import RTMPStream, HTTPStream
SWF_LIVE_URL = "https://www.filmon.com/tv/modules/FilmOnTV/files/flashapp/filmon/FilmonPlayer.swf"
SWF_VIDEO_URL = "http://www.filmon.us/application/themes/base/flash/MediaPlayer.swf"
_url_re = re.compile("http(s)?://(\w+\.)?filmon.us")
_live_export_re = re.compile(
"<iframe src=\"(https://www.filmon.com/channel/export[^\"]+)\""
)
_live_json_re = re.compile("var startupChannel = (.+);")
_replay_json_re = re.compile("var standByVideo = encodeURIComponent\('(.+)'\);")
_history_re = re.compile(
"helpers.common.flash.flashplayerinstall\({url:'([^']+)',"
)
_video_flashvars_re = re.compile(
"<embed width=\"486\" height=\"326\" flashvars=\"([^\"]+)\""
)
_live_schema = validate.Schema({
"streams": [{
"name": validate.text,
"quality": validate.text,
"url": validate.url(scheme="rtmp")
}]
})
_schema = validate.Schema(
validate.union({
"export_url": validate.all(
validate.transform(_live_export_re.search),
validate.any(
None,
validate.get(1),
)
),
"video_flashvars": validate.all(
validate.transform(_video_flashvars_re.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.transform(parse_query),
{
"_111pix_serverURL": validate.url(scheme="rtmp"),
"en_flash_providerName": validate.text
}
)
)
),
"history_video": validate.all(
validate.transform(_history_re.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.url(scheme="http")
)
)
),
"standby_video": validate.all(
validate.transform(_replay_json_re.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.transform(parse_json),
[{
"streamName": validate.url(scheme="http")
}]
)
)
)
})
)
class Filmon_us(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_live_stream(self, export_url):
res = http.get(export_url)
match = _live_json_re.search(res.text)
if not match:
return
json = parse_json(match.group(1), schema=_live_schema)
streams = {}
for stream in json["streams"]:
stream_name = stream["quality"]
parsed = urlparse(stream["url"])
stream = RTMPStream(self.session, {
"rtmp": stream["url"],
"app": "{0}?{1}".format(parsed.path[1:], parsed.query),
"playpath": stream["name"],
"swfVfy": SWF_LIVE_URL,
"pageUrl": self.url,
"live": True
})
streams[stream_name] = stream
return streams
def _get_streams(self):
res = http.get(self.url, schema=_schema)
if res["export_url"]:
return self._get_live_stream(res["export_url"])
elif res["video_flashvars"]:
stream = RTMPStream(self.session, {
"rtmp": res["video_flashvars"]["_111pix_serverURL"],
"playpath": res["video_flashvars"]["en_flash_providerName"],
"swfVfy": SWF_VIDEO_URL,
"pageUrl": self.url
})
return dict(video=stream)
elif res["standby_video"]:
for stream in res["standby_video"]:
stream = HTTPStream(self.session, stream["streamName"])
return dict(replay=stream)
elif res["history_video"]:
stream = HTTPStream(self.session, res["history_video"])
return dict(history=stream)
return
__plugin__ = Filmon_us
| gpl-2.0 |
YehudaItkin/virt-test | virttest/libvirt_xml/devices/channel.py | 30 | 1406 | """
Classes to support XML for channel devices
http://libvirt.org/formatdomain.html#elementCharSerial
"""
from virttest.libvirt_xml import base
from virttest.libvirt_xml import accessors
from virttest.libvirt_xml.devices.character import CharacterBase
class Channel(CharacterBase):
__slots__ = ('source', 'target', 'alias', 'address')
def __init__(self, type_name='unix', virsh_instance=base.virsh):
accessors.XMLElementDict('source', self, parent_xpath='/',
tag_name='source')
accessors.XMLElementDict('target', self, parent_xpath='/',
tag_name='target')
# example for new slots : alias and address
#
# <?xml version='1.0' encoding='UTF-8'?>
# <channel type="pty">
# <source path="/dev/pts/10" />
# <target name="pty" type="virtio" />
# <alias name="pty" />
# <address bus="0" controller="0" type="virtio-serial" />
# </channel>
accessors.XMLElementDict('alias', self, parent_xpath='/',
tag_name='alias')
accessors.XMLElementDict('address', self, parent_xpath='/',
tag_name='address')
super(
Channel, self).__init__(device_tag='channel', type_name=type_name,
virsh_instance=virsh_instance)
| gpl-2.0 |
wschwa/Mr-Orange-Sick-Beard | lib/requests/packages/chardet/langhebrewmodel.py | 235 | 11340 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = ( \
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = { \
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': constants.False,
'charsetName': "windows-1255"
}
| gpl-3.0 |
asedunov/intellij-community | python/lib/Lib/wsgiref/simple_server.py | 104 | 4789 | """BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21)
This is both an example of how WSGI can be implemented, and a basis for running
simple web applications on a local machine, such as might be done when testing
or debugging an application. It has not been reviewed for security issues,
however, and we strongly recommend that you use a "real" web server for
production use.
For example usage, see the 'if __name__=="__main__"' block at the end of the
module. See also the BaseHTTPServer module docs for other API information.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urllib, sys
from wsgiref.handlers import SimpleHandler
__version__ = "0.1"
__all__ = ['WSGIServer', 'WSGIRequestHandler', 'demo_app', 'make_server']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class ServerHandler(SimpleHandler):
server_software = software_version
def close(self):
try:
self.request_handler.log_request(
self.status.split(' ',1)[0], self.bytes_sent
)
finally:
SimpleHandler.close(self)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
HTTPServer.server_bind(self)
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def demo_app(environ,start_response):
from StringIO import StringIO
stdout = StringIO()
print >>stdout, "Hello world!"
print >>stdout
h = environ.items(); h.sort()
for k,v in h:
print >>stdout, k,'=',`v`
start_response("200 OK", [('Content-Type','text/plain')])
return [stdout.getvalue()]
def make_server(
host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler
):
"""Create a new WSGI server listening on `host` and `port` for `app`"""
server = server_class((host, port), handler_class)
server.set_app(app)
return server
if __name__ == '__main__':
httpd = make_server('', 8000, demo_app)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
import webbrowser
webbrowser.open('http://localhost:8000/xyz?abc')
httpd.handle_request() # serve one request, then exit
#
| apache-2.0 |
binhqnguyen/lena-local | results/tcp/graphs/emulated/tahoe-50pkts/reproduce/data-scripts/emulated/FileHandle.py | 11 | 1884 | #!/usr/bin/python
import os
###This class is used to handle (read/write) file/folder.
class FileHandle(object):
'''
classdocs
'''
input_dir = ""
output_file = ""
old_file_path_list = []
#constructor
def __init__(self,input_dir, output_file = ""):
if input_dir is "":
print("Warning: No input directory explicit! quit now.")
exit()
self.input_dir = input_dir
self.output_file = output_file
#read a file, return the Y-axis values (line 3) of the file
def read_single_file(self, file_name = ""):
print("***reading file ... " + file_name)
return_value = []
values_str = ""
file = open(file_name)
line = file.readline()
i = 0
while line:
i += 1
if i == 4:
values_str += line
break
file.close()
print("return = " + values_str)
return return_value
#write a string to OUTPUT_FILE
def write_output(self, write_content):
file = open(self.output_file,"a")
file.write(write_content)
file.close()
#read all files of a specified directory, return a list of names of files in the directory.
def read_dir(self, extension = ""):
print ("input dir = " + self.input_dir)
f_list = os.listdir(self.input_dir)
if not f_list:
return self.old_file_path_list
for file in f_list:
###if not match the extension, skip the file
if (os.path.splitext(file)[1] != extension):
continue
file = self.input_dir + "/"+file
self.old_file_path_list.append(file)
return self.old_file_path_list
def clear_output_file(self):
open(self.output_file,"w").close()
################## | gpl-2.0 |
Jannes123/inasafe | safe/gis/qgis_vector_tools.py | 2 | 20449 | # coding=utf-8
"""**Utilities around QgsVectorLayer**
"""
__author__ = 'Dmitry Kolesov <kolesov.dm@gmail.com>'
__revision__ = '$Format:%H$'
__date__ = '14/01/2014'
__license__ = "GPL"
__copyright__ = 'Copyright 2012, Australia Indonesia Facility for '
__copyright__ += 'Disaster Reduction'
import itertools
from qgis.core import (
QgsField,
QgsVectorLayer,
QgsFeature,
QgsPoint,
QgsGeometry,
QgsFeatureRequest,
QgsVectorFileWriter,
QgsCoordinateReferenceSystem,
QgsCoordinateTransform)
from PyQt4.QtCore import QVariant
from safe.common.utilities import unique_filename
from safe.common.exceptions import WrongDataTypeException
def points_to_rectangles(points, dx, dy):
"""Create polygon layer around points. The polygons are dx to dy.
Attributes of the points are copied.
A point position is upper-left corner of the created rectangle.
:param points: Point layer.
:type points: QgsVectorLayer
:param dx: Length of the horizontal sides
:type dx: float
:param dy: Length of the vertical sides
:type dy: float
:returns: Polygon layer
:rtype: QgsVectorLayer
"""
crs = points.crs().toWkt()
point_provider = points.dataProvider()
fields = point_provider.fields()
# Create layer for store the lines from E and extent
polygons = QgsVectorLayer(
'Polygon?crs=' + crs, 'polygons', 'memory')
polygon_provider = polygons.dataProvider()
polygon_provider.addAttributes(fields.toList())
polygons.startEditing()
for feature in points.getFeatures():
attrs = feature.attributes()
point = feature.geometry().asPoint()
x, y = point.x(), point.y()
# noinspection PyCallByClass,PyTypeChecker
g = QgsGeometry.fromPolygon([
[QgsPoint(x, y),
QgsPoint(x + dx, y),
QgsPoint(x + dx, y - dy),
QgsPoint(x, y - dy)]
])
polygon_feat = QgsFeature()
polygon_feat.setGeometry(g)
polygon_feat.setAttributes(attrs)
_ = polygon_provider.addFeatures([polygon_feat])
polygons.commitChanges()
return polygons
def union_geometry(vector, request=QgsFeatureRequest()):
"""Return union of the vector geometries regardless of the attributes.
(If request is specified, filter the objects before union).
If all geometries in the vector are invalid, return None.
The boundaries will be dissolved during the operation.
:param vector: Vector layer
:type vector: QgsVectorLayer
:param request: Filter for vector objects
:type request: QgsFeatureRequest
:return: Union of the geometry
:rtype: QgsGeometry or None
"""
result_geometry = None
for feature in vector.getFeatures(request):
if result_geometry is None:
result_geometry = QgsGeometry(feature.geometry())
else:
# But some feature.geometry() may be invalid, skip them
tmp_geometry = result_geometry.combine(feature.geometry())
try:
if tmp_geometry.isGeosValid():
result_geometry = tmp_geometry
except AttributeError:
pass
return result_geometry
def create_layer(vector):
"""Create empty layer.
The CRS and Geometry Type of new layer are the same as of vector layer.
Attributes of the layer are copied from vector.
:param vector: Vector layer
:type vector: QgsVectorLayer
:returns: Empty vector layer (stored in memory)
:rtype: QgsVectorLayer
"""
crs = vector.crs().toWkt()
if vector.geometryType() == 0:
msg = "Points cant' be split"
raise WrongDataTypeException(msg)
elif vector.geometryType() == 1:
uri = 'LineString?crs=' + crs
elif vector.geometryType() == 2:
uri = 'Polygon?crs=' + crs
else:
msg = "Received unexpected type of layer geometry: %s" \
% (vector.geometryType(),)
raise WrongDataTypeException(msg)
result_layer = QgsVectorLayer(uri, 'intersected', 'memory')
result_provider = result_layer.dataProvider()
result_layer.startEditing()
# Copy fields from vector
vector_provider = vector.dataProvider()
fields = vector_provider.fields()
result_provider.addAttributes(fields.toList())
result_layer.commitChanges()
return result_layer
def clip_by_polygon(
vector,
polygon):
"""Clip vector layer using polygon.
Return part of the objects that lie within the polygon.
:param vector: Vector layer
:type vector: QgsVectorLayer
:param polygon: Clipping polygon
:type polygon: QgsGeometry
:returns: Vector layer with split geometry
:rtype: QgsVectorLayer
"""
result_layer = create_layer(vector)
result_layer.startEditing()
for feature in vector.getFeatures():
geom = feature.geometry()
attributes = feature.attributes()
geometry_type = geom.type()
if polygon.intersects(geom):
# Find parts of initial_geom, intersecting
# with the polygon, then mark them if needed
intersection = QgsGeometry(
geom.intersection(polygon)
).asGeometryCollection()
for g in intersection:
if g.type() == geometry_type:
feature = QgsFeature()
feature.setGeometry(g)
feature.setAttributes(attributes)
_ = result_layer.dataProvider().addFeatures([feature])
result_layer.commitChanges()
result_layer.updateExtents()
return result_layer
def split_by_polygon(
vector,
polygon,
request=QgsFeatureRequest(),
mark_value=None):
"""Split objects from vector layer by polygon.
If request is specified, filter the objects before splitting.
If part of vector object lies in the polygon, mark it by mark_value (
optional).
:param vector: Vector layer
:type vector: QgsVectorLayer
:param polygon: Splitting polygon
:type polygon: QgsGeometry
:param request: Filter for vector objects
:type request: QgsFeatureRequest
:param mark_value: Field value to mark the objects.
:type mark_value: (field_name, field_value).or None
:returns: Vector layer with split geometry
:rtype: QgsVectorLayer
"""
def _set_feature(geometry, feature_attributes):
"""
Helper to create and set up feature
"""
included_feature = QgsFeature()
included_feature.setGeometry(geometry)
included_feature.setAttributes(feature_attributes)
return included_feature
def _update_attr_list(attributes, index, value, add_attribute=False):
"""
Helper for update list of attributes.
"""
new_attributes = attributes[:]
if add_attribute:
new_attributes.append(value)
else:
new_attributes[index] = value
return new_attributes
# Create layer to store the splitted objects
result_layer = create_layer(vector)
result_provider = result_layer.dataProvider()
fields = result_provider.fields()
# If target_field does not exist, add it:
new_field_added = False
if mark_value is not None:
target_field = mark_value[0]
if fields.indexFromName(target_field) == -1:
result_layer.startEditing()
result_provider.addAttributes(
[QgsField(target_field, QVariant.Int)])
new_field_added = True
result_layer.commitChanges()
target_value = None
if mark_value is not None:
target_field = mark_value[0]
target_value = mark_value[1]
target_field_index = result_provider.fieldNameIndex(target_field)
if target_field_index == -1:
raise WrongDataTypeException(
'Field not found for %s' % target_field)
# Start split procedure
result_layer.startEditing()
for initial_feature in vector.getFeatures(request):
initial_geom = initial_feature.geometry()
attributes = initial_feature.attributes()
geometry_type = initial_geom.type()
if polygon.intersects(initial_geom):
# Find parts of initial_geom, intersecting
# with the polygon, then mark them if needed
intersection = QgsGeometry(
initial_geom.intersection(polygon)
).asGeometryCollection()
for g in intersection:
if g.type() == geometry_type:
if mark_value is not None:
new_attributes = _update_attr_list(
attributes,
target_field_index,
target_value,
add_attribute=new_field_added
)
else:
new_attributes = attributes
feature = _set_feature(g, new_attributes)
_ = result_layer.dataProvider().addFeatures([feature])
# Find parts of the initial_geom that do not lie in the polygon
diff_geom = QgsGeometry(
initial_geom.symDifference(polygon)
).asGeometryCollection()
for g in diff_geom:
if g.type() == geometry_type:
if mark_value is not None:
new_attributes = _update_attr_list(
attributes,
target_field_index,
0,
add_attribute=new_field_added
)
else:
new_attributes = attributes
feature = _set_feature(g, new_attributes)
_ = result_layer.dataProvider().addFeatures([feature])
else:
if mark_value is not None:
new_attributes = _update_attr_list(
attributes,
target_field_index,
0,
add_attribute=new_field_added
)
else:
new_attributes = attributes
feature = _set_feature(initial_geom, new_attributes)
_ = result_layer.dataProvider().addFeatures([feature])
result_layer.commitChanges()
result_layer.updateExtents()
return result_layer
def split_by_polygon_in_out(
vector,
polygon_in,
polygon_out,
target_field,
value,
request=QgsFeatureRequest()):
"""Split a polygon layer updating the target field with the value.
All parts of vector layer will have their target_field updated to
value if they fall within polygon_in.
:param vector: A polygon vector layer to split.
:type vector: QgsVectorLayer
:param polygon_in: Polygon within which vector features will be considered
to be contained.
:type polygon_in: QgsGeometry
:param polygon_out: Polygon within which vector features will be considered
to be NOT contained.
:type polygon_out: QgsGeometry
:param target_field: Field in vector layer to be updated if features
are within polygon_in.
:type target_field: QgsField
:param value: Value to update the target field with if polygons are in.
:type value: int, float, str
:param request: Optional feature request used to subset the features
in vector.
:type request: QgsFeatureRequest
:return: QgsVectorLayer of split line for whichever is greater,
in our out polygons.
:rtype: QgsVectorLayer
"""
base_name = unique_filename()
file_name_in = base_name + '_in.shp'
file_name_out = base_name + '_out.shp'
file_name_poly_in = base_name + '_poly_in.shp'
file_name_poly_out = base_name + '_poly_out.shp'
# noinspection PyArgumentEqualDefault
line_layer_in = split_by_polygon2(
vector,
polygon_in,
request,
use_contains_operation=False,
mark_value=(target_field, value))
line_layer_out = split_by_polygon2(
vector,
polygon_out,
request,
use_contains_operation=True,
mark_value=(target_field, 0))
QgsVectorFileWriter.writeAsVectorFormat(
line_layer_in, file_name_in, "utf-8", None, "ESRI Shapefile")
QgsVectorFileWriter.writeAsVectorFormat(
line_layer_out, file_name_out, "utf-8", None, "ESRI Shapefile")
QgsVectorFileWriter.writeAsVectorFormat(
polygon_in, file_name_poly_in, "utf-8", None, "ESRI Shapefile")
QgsVectorFileWriter.writeAsVectorFormat(
polygon_out, file_name_poly_out, "utf-8", None, "ESRI Shapefile")
# merge layers
in_features = line_layer_in.featureCount()
out_features = line_layer_out.featureCount()
if in_features > out_features:
for feature in line_layer_out.getFeatures():
line_layer_in.dataProvider().addFeatures([feature])
return line_layer_in
else:
for feature in line_layer_in.getFeatures():
line_layer_out.dataProvider().addFeatures([feature])
return line_layer_out
def split_by_polygon2(
vector,
polygon_layer,
request=QgsFeatureRequest(),
use_contains_operation=False,
mark_value=None):
"""Split objects from vector layer by polygon.
If request is specified, filter the objects before splitting.
If part of vector object lies in the polygon, mark it by mark_value (
optional).
:param vector: Vector layer
:type vector: QgsVectorLayer
:param polygon_layer: Splitting polygons layer
:type polygon_layer: QgsVectorLayer
:param request: Filter for vector objects
:type request: QgsFeatureRequest
:param use_contains_operation: Whether to use geometrical containment.
:type use_contains_operation: bool
:param mark_value: Field value to mark the objects.
:type mark_value: (field_name, field_value).or None
:returns: Vector layer with split geometry
:rtype: QgsVectorLayer
"""
def _set_feature(geometry, attributes):
"""
Helper to create and set up feature
"""
included_feature = QgsFeature()
included_feature.setGeometry(geometry)
included_feature.setAttributes(attributes)
return included_feature
def _update_attr_list(attributes, index, value, add_attribute=False):
"""
Helper for update list of attributes.
"""
new_attributes = attributes[:]
if add_attribute:
new_attributes.append(value)
else:
new_attributes[index] = value
return new_attributes
# Create layer to store the split objects
result_layer = create_layer(vector)
result_provider = result_layer.dataProvider()
fields = result_provider.fields()
# If target_field does not exist, add it:
new_field_added = False
if mark_value is not None:
target_field = mark_value[0]
if fields.indexFromName(target_field) == -1:
result_layer.startEditing()
result_provider.addAttributes(
[QgsField(target_field, QVariant.Int)])
new_field_added = True
result_layer.commitChanges()
target_value = None
if mark_value is not None:
target_field = mark_value[0]
target_value = mark_value[1]
target_field_index = result_provider.fieldNameIndex(target_field)
if target_field_index == -1:
raise WrongDataTypeException(
'Field not found for %s' % target_field)
# Start split procedure
line_geoms = []
line_attributes = []
for initial_feature in vector.getFeatures(request):
initial_geom = initial_feature.geometry()
line_geoms.append(QgsGeometry(initial_geom))
attributes = initial_feature.attributes()
line_attributes.append(attributes)
geometry_type = initial_geom.type()
poly_geoms = []
for polygon_feature in polygon_layer.getFeatures(request):
# Using simplify 1 should remove any pseudonodes on the polygon
# and speed up polygon operations. TS
# polygon = polygon_feature.geometry().simplify(1)
# disabled for now (see #1300 Y.A.)
polygon = polygon_feature.geometry()
poly_geoms.append(QgsGeometry(polygon))
result_layer.startEditing()
for polygon in poly_geoms:
for initial_geom, attributes in \
itertools.izip(line_geoms, line_attributes):
if use_contains_operation:
poly_contains = polygon.contains(initial_geom)
else:
poly_contains = False
poly_intersect = False
if not poly_contains:
poly_intersect = polygon.intersects(initial_geom)
if poly_contains or poly_intersect:
# Find parts of initial_geom, intersecting
# with the polygon, then mark them if needed
if poly_contains:
g = initial_geom
if mark_value is not None:
new_attributes = _update_attr_list(
attributes,
target_field_index,
target_value,
add_attribute=new_field_added
)
else:
new_attributes = attributes
feature = _set_feature(g, new_attributes)
_ = result_layer.dataProvider().addFeatures([feature])
else:
intersection = QgsGeometry(
initial_geom.intersection(polygon)
).asGeometryCollection()
for g in intersection:
if g.type() == geometry_type:
if mark_value is not None:
new_attributes = _update_attr_list(
attributes,
target_field_index,
target_value,
add_attribute=new_field_added
)
else:
new_attributes = attributes
feature = _set_feature(g, new_attributes)
_ = result_layer.dataProvider().\
addFeatures([feature])
result_layer.commitChanges()
result_layer.updateExtents()
return result_layer
def extent_to_geo_array(extent, source_crs, dest_crs=None):
"""Convert the supplied extent to geographic and return as an array.
:param extent: Rectangle defining a spatial extent in any CRS.
:type extent: QgsRectangle
:param source_crs: Coordinate system used for extent.
:type source_crs: QgsCoordinateReferenceSystem
:returns: a list in the form [xmin, ymin, xmax, ymax] where all
coordinates provided are in Geographic / EPSG:4326.
:rtype: list
"""
if dest_crs is None:
geo_crs = QgsCoordinateReferenceSystem()
geo_crs.createFromSrid(4326)
else:
geo_crs = dest_crs
transform = QgsCoordinateTransform(source_crs, geo_crs)
# Get the clip area in the layer's crs
transformed_extent = transform.transformBoundingBox(extent)
geo_extent = [
transformed_extent.xMinimum(),
transformed_extent.yMinimum(),
transformed_extent.xMaximum(),
transformed_extent.yMaximum()]
return geo_extent
def reproject_vector_layer(layer, crs):
"""Reproject a vector layer to given CRS
:param layer: Vector layer
:type layer: QgsVectorLayer
:param crs: Coordinate system for reprojection.
:type crs: QgsCoordinateReferenceSystem
:returns: a vector layer with the specified projection
:rtype: QgsVectorLayer
"""
base_name = unique_filename()
file_name = base_name + '.shp'
print "reprojected layer1 %s" % file_name
QgsVectorFileWriter.writeAsVectorFormat(
layer, file_name, "utf-8", crs, "ESRI Shapefile")
return QgsVectorLayer(file_name, base_name, "ogr")
| gpl-3.0 |
Work4Labs/lettuce | tests/integration/lib/Django-1.2.5/tests/modeltests/model_forms/models.py | 38 | 52607 | """
XX. Generating HTML forms from models
This is mostly just a reworking of the ``form_for_model``/``form_for_instance``
tests to use ``ModelForm``. As such, the text may not make sense in all cases,
and the examples are probably a poor fit for the ``ModelForm`` syntax. In other
words, most of these tests should be rewritten.
"""
import os
import tempfile
from django.db import models
from django.core.files.storage import FileSystemStorage
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
ARTICLE_STATUS = (
(1, 'Draft'),
(2, 'Pending'),
(3, 'Live'),
)
ARTICLE_STATUS_CHAR = (
('d', 'Draft'),
('p', 'Pending'),
('l', 'Live'),
)
class Category(models.Model):
name = models.CharField(max_length=20)
slug = models.SlugField(max_length=20)
url = models.CharField('The URL', max_length=40)
def __unicode__(self):
return self.name
class Writer(models.Model):
name = models.CharField(max_length=50, help_text='Use both first and last names.')
def __unicode__(self):
return self.name
class Article(models.Model):
headline = models.CharField(max_length=50)
slug = models.SlugField()
pub_date = models.DateField()
created = models.DateField(editable=False)
writer = models.ForeignKey(Writer)
article = models.TextField()
categories = models.ManyToManyField(Category, blank=True)
status = models.PositiveIntegerField(choices=ARTICLE_STATUS, blank=True, null=True)
def save(self):
import datetime
if not self.id:
self.created = datetime.date.today()
return super(Article, self).save()
def __unicode__(self):
return self.headline
class ImprovedArticle(models.Model):
article = models.OneToOneField(Article)
class ImprovedArticleWithParentLink(models.Model):
article = models.OneToOneField(Article, parent_link=True)
class BetterWriter(Writer):
score = models.IntegerField()
class WriterProfile(models.Model):
writer = models.OneToOneField(Writer, primary_key=True)
age = models.PositiveIntegerField()
def __unicode__(self):
return "%s is %s" % (self.writer, self.age)
from django.contrib.localflavor.us.models import PhoneNumberField
class PhoneNumber(models.Model):
phone = PhoneNumberField()
description = models.CharField(max_length=20)
def __unicode__(self):
return self.phone
class TextFile(models.Model):
description = models.CharField(max_length=20)
file = models.FileField(storage=temp_storage, upload_to='tests', max_length=15)
def __unicode__(self):
return self.description
try:
# If PIL is available, try testing ImageFields. Checking for the existence
# of Image is enough for CPython, but for PyPy, you need to check for the
# underlying modules If PIL is not available, ImageField tests are omitted.
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image, _imaging
except ImportError:
import Image, _imaging
test_images = True
class ImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
# Deliberately put the image field *after* the width/height fields to
# trigger the bug in #10404 with width/height not getting assigned.
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height')
path = models.CharField(max_length=16, blank=True, default='')
def __unicode__(self):
return self.description
class OptionalImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height',
blank=True, null=True)
width = models.IntegerField(editable=False, null=True)
height = models.IntegerField(editable=False, null=True)
path = models.CharField(max_length=16, blank=True, default='')
def __unicode__(self):
return self.description
except ImportError:
test_images = False
class CommaSeparatedInteger(models.Model):
field = models.CommaSeparatedIntegerField(max_length=20)
def __unicode__(self):
return self.field
class Product(models.Model):
slug = models.SlugField(unique=True)
def __unicode__(self):
return self.slug
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __unicode__(self):
return u"%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class ArticleStatus(models.Model):
status = models.CharField(max_length=2, choices=ARTICLE_STATUS_CHAR, blank=True, null=True)
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
def __unicode__(self):
return self.name
class Book(models.Model):
title = models.CharField(max_length=40)
author = models.ForeignKey(Writer, blank=True, null=True)
special_id = models.IntegerField(blank=True, null=True, unique=True)
class Meta:
unique_together = ('title', 'author')
class BookXtra(models.Model):
isbn = models.CharField(max_length=16, unique=True)
suffix1 = models.IntegerField(blank=True, default=0)
suffix2 = models.IntegerField(blank=True, default=0)
class Meta:
unique_together = (('suffix1', 'suffix2'))
abstract = True
class DerivedBook(Book, BookXtra):
pass
class ExplicitPK(models.Model):
key = models.CharField(max_length=20, primary_key=True)
desc = models.CharField(max_length=20, blank=True, unique=True)
class Meta:
unique_together = ('key', 'desc')
def __unicode__(self):
return self.key
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __unicode__(self):
return self.name
class DerivedPost(Post):
pass
class BigInt(models.Model):
biggie = models.BigIntegerField()
def __unicode__(self):
return unicode(self.biggie)
class MarkupField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs["max_length"] = 20
super(MarkupField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
# don't allow this field to be used in form (real use-case might be
# that you know the markup will always be X, but it is among an app
# that allows the user to say it could be something else)
# regressed at r10062
return None
class CustomFieldForExclusionModel(models.Model):
name = models.CharField(max_length=10)
markup = MarkupField()
class FlexibleDatePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField(blank=True, null=True)
__test__ = {'API_TESTS': """
>>> from django import forms
>>> from django.forms.models import ModelForm, model_to_dict
>>> from django.core.files.uploadedfile import SimpleUploadedFile
The bare bones, absolutely nothing custom, basic case.
>>> class CategoryForm(ModelForm):
... class Meta:
... model = Category
>>> CategoryForm.base_fields.keys()
['name', 'slug', 'url']
Extra fields.
>>> class CategoryForm(ModelForm):
... some_extra_field = forms.BooleanField()
...
... class Meta:
... model = Category
>>> CategoryForm.base_fields.keys()
['name', 'slug', 'url', 'some_extra_field']
Extra field that has a name collision with a related object accessor.
>>> class WriterForm(ModelForm):
... book = forms.CharField(required=False)
...
... class Meta:
... model = Writer
>>> wf = WriterForm({'name': 'Richard Lockridge'})
>>> wf.is_valid()
True
Replacing a field.
>>> class CategoryForm(ModelForm):
... url = forms.BooleanField()
...
... class Meta:
... model = Category
>>> CategoryForm.base_fields['url'].__class__
<class 'django.forms.fields.BooleanField'>
Using 'fields'.
>>> class CategoryForm(ModelForm):
...
... class Meta:
... model = Category
... fields = ['url']
>>> CategoryForm.base_fields.keys()
['url']
Using 'exclude'
>>> class CategoryForm(ModelForm):
...
... class Meta:
... model = Category
... exclude = ['url']
>>> CategoryForm.base_fields.keys()
['name', 'slug']
Using 'fields' *and* 'exclude'. Not sure why you'd want to do this, but uh,
"be liberal in what you accept" and all.
>>> class CategoryForm(ModelForm):
...
... class Meta:
... model = Category
... fields = ['name', 'url']
... exclude = ['url']
>>> CategoryForm.base_fields.keys()
['name']
Using 'widgets'
>>> class CategoryForm(ModelForm):
...
... class Meta:
... model = Category
... fields = ['name', 'url', 'slug']
... widgets = {
... 'name': forms.Textarea,
... 'url': forms.TextInput(attrs={'class': 'url'})
... }
>>> str(CategoryForm()['name'])
'<textarea id="id_name" rows="10" cols="40" name="name"></textarea>'
>>> str(CategoryForm()['url'])
'<input id="id_url" type="text" class="url" name="url" maxlength="40" />'
>>> str(CategoryForm()['slug'])
'<input id="id_slug" type="text" name="slug" maxlength="20" />'
Don't allow more than one 'model' definition in the inheritance hierarchy.
Technically, it would generate a valid form, but the fact that the resulting
save method won't deal with multiple objects is likely to trip up people not
familiar with the mechanics.
>>> class CategoryForm(ModelForm):
... class Meta:
... model = Category
>>> class OddForm(CategoryForm):
... class Meta:
... model = Article
OddForm is now an Article-related thing, because BadForm.Meta overrides
CategoryForm.Meta.
>>> OddForm.base_fields.keys()
['headline', 'slug', 'pub_date', 'writer', 'article', 'status', 'categories']
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
First class with a Meta class wins.
>>> class BadForm(ArticleForm, CategoryForm):
... pass
>>> OddForm.base_fields.keys()
['headline', 'slug', 'pub_date', 'writer', 'article', 'status', 'categories']
Subclassing without specifying a Meta on the class will use the parent's Meta
(or the first parent in the MRO if there are multiple parent classes).
>>> class CategoryForm(ModelForm):
... class Meta:
... model = Category
>>> class SubCategoryForm(CategoryForm):
... pass
>>> SubCategoryForm.base_fields.keys()
['name', 'slug', 'url']
We can also subclass the Meta inner class to change the fields list.
>>> class CategoryForm(ModelForm):
... checkbox = forms.BooleanField()
...
... class Meta:
... model = Category
>>> class SubCategoryForm(CategoryForm):
... class Meta(CategoryForm.Meta):
... exclude = ['url']
>>> print SubCategoryForm()
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th><td><input type="checkbox" name="checkbox" id="id_checkbox" /></td></tr>
# test using fields to provide ordering to the fields
>>> class CategoryForm(ModelForm):
... class Meta:
... model = Category
... fields = ['url', 'name']
>>> CategoryForm.base_fields.keys()
['url', 'name']
>>> print CategoryForm()
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
>>> class CategoryForm(ModelForm):
... class Meta:
... model = Category
... fields = ['slug', 'url', 'name']
... exclude = ['url']
>>> CategoryForm.base_fields.keys()
['slug', 'name']
# Old form_for_x tests #######################################################
>>> from django.forms import ModelForm, CharField
>>> import datetime
>>> Category.objects.all()
[]
>>> class CategoryForm(ModelForm):
... class Meta:
... model = Category
>>> f = CategoryForm()
>>> print f
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th><td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
>>> print f.as_ul()
<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" /></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" /></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" /></li>
>>> print f['name']
<input id="id_name" type="text" name="name" maxlength="20" />
>>> f = CategoryForm(auto_id=False)
>>> print f.as_ul()
<li>Name: <input type="text" name="name" maxlength="20" /></li>
<li>Slug: <input type="text" name="slug" maxlength="20" /></li>
<li>The URL: <input type="text" name="url" maxlength="40" /></li>
>>> f = CategoryForm({'name': 'Entertainment', 'slug': 'entertainment', 'url': 'entertainment'})
>>> f.is_valid()
True
>>> f.cleaned_data['url']
u'entertainment'
>>> f.cleaned_data['name']
u'Entertainment'
>>> f.cleaned_data['slug']
u'entertainment'
>>> obj = f.save()
>>> obj
<Category: Entertainment>
>>> Category.objects.all()
[<Category: Entertainment>]
>>> f = CategoryForm({'name': "It's a test", 'slug': 'its-test', 'url': 'test'})
>>> f.is_valid()
True
>>> f.cleaned_data['url']
u'test'
>>> f.cleaned_data['name']
u"It's a test"
>>> f.cleaned_data['slug']
u'its-test'
>>> obj = f.save()
>>> obj
<Category: It's a test>
>>> Category.objects.order_by('name')
[<Category: Entertainment>, <Category: It's a test>]
If you call save() with commit=False, then it will return an object that
hasn't yet been saved to the database. In this case, it's up to you to call
save() on the resulting model instance.
>>> f = CategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
>>> f.is_valid()
True
>>> f.cleaned_data['url']
u'third'
>>> f.cleaned_data['name']
u'Third test'
>>> f.cleaned_data['slug']
u'third-test'
>>> obj = f.save(commit=False)
>>> obj
<Category: Third test>
>>> Category.objects.order_by('name')
[<Category: Entertainment>, <Category: It's a test>]
>>> obj.save()
>>> Category.objects.order_by('name')
[<Category: Entertainment>, <Category: It's a test>, <Category: Third test>]
If you call save() with invalid data, you'll get a ValueError.
>>> f = CategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
>>> f.errors['name']
[u'This field is required.']
>>> f.errors['slug']
[u"Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."]
>>> f.cleaned_data
Traceback (most recent call last):
...
AttributeError: 'CategoryForm' object has no attribute 'cleaned_data'
>>> f.save()
Traceback (most recent call last):
...
ValueError: The Category could not be created because the data didn't validate.
>>> f = CategoryForm({'name': '', 'slug': '', 'url': 'foo'})
>>> f.save()
Traceback (most recent call last):
...
ValueError: The Category could not be created because the data didn't validate.
Create a couple of Writers.
>>> w_royko = Writer(name='Mike Royko')
>>> w_royko.save()
>>> w_woodward = Writer(name='Bob Woodward')
>>> w_woodward.save()
ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
fields with the 'choices' attribute are represented by a ChoiceField.
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = ArticleForm(auto_id=False)
>>> print f
<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>
<tr><th>Writer:</th><td><select name="writer">
<option value="" selected="selected">---------</option>
<option value="...">Mike Royko</option>
<option value="...">Bob Woodward</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article"></textarea></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>
<tr><th>Categories:</th><td><select multiple="multiple" name="categories">
<option value="1">Entertainment</option>
<option value="2">It's a test</option>
<option value="3">Third test</option>
</select><br /> Hold down "Control", or "Command" on a Mac, to select more than one.</td></tr>
You can restrict a form to a subset of the complete list of fields
by providing a 'fields' argument. If you try to save a
model created with such a form, you need to ensure that the fields
that are _not_ on the form have default values, or are allowed to have
a value of None. If a field isn't specified on a form, the object created
from the form can't provide a value for that field!
>>> class PartialArticleForm(ModelForm):
... class Meta:
... model = Article
... fields = ('headline','pub_date')
>>> f = PartialArticleForm(auto_id=False)
>>> print f
<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>
When the ModelForm is passed an instance, that instance's current values are
inserted as 'initial' data in each Field.
>>> w = Writer.objects.get(name='Mike Royko')
>>> class RoykoForm(ModelForm):
... class Meta:
... model = Writer
>>> f = RoykoForm(auto_id=False, instance=w)
>>> print f
<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" /><br />Use both first and last names.</td></tr>
>>> art = Article(headline='Test article', slug='test-article', pub_date=datetime.date(1988, 1, 4), writer=w, article='Hello.')
>>> art.save()
>>> art.id
1
>>> class TestArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = TestArticleForm(auto_id=False, instance=art)
>>> print f.as_ul()
<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="..." selected="selected">Mike Royko</option>
<option value="...">Bob Woodward</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="1">Entertainment</option>
<option value="2">It's a test</option>
<option value="3">Third test</option>
</select> Hold down "Control", or "Command" on a Mac, to select more than one.</li>
>>> f = TestArticleForm({'headline': u'Test headline', 'slug': 'test-headline', 'pub_date': u'1984-02-06', 'writer': unicode(w_royko.pk), 'article': 'Hello.'}, instance=art)
>>> f.errors
{}
>>> f.is_valid()
True
>>> test_art = f.save()
>>> test_art.id
1
>>> test_art = Article.objects.get(id=1)
>>> test_art.headline
u'Test headline'
You can create a form over a subset of the available fields
by specifying a 'fields' argument to form_for_instance.
>>> class PartialArticleForm(ModelForm):
... class Meta:
... model = Article
... fields=('headline', 'slug', 'pub_date')
>>> f = PartialArticleForm({'headline': u'New headline', 'slug': 'new-headline', 'pub_date': u'1988-01-04'}, auto_id=False, instance=art)
>>> print f.as_ul()
<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
>>> f.is_valid()
True
>>> new_art = f.save()
>>> new_art.id
1
>>> new_art = Article.objects.get(id=1)
>>> new_art.headline
u'New headline'
Add some categories and test the many-to-many form output.
>>> new_art.categories.all()
[]
>>> new_art.categories.add(Category.objects.get(name='Entertainment'))
>>> new_art.categories.all()
[<Category: Entertainment>]
>>> class TestArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = TestArticleForm(auto_id=False, instance=new_art)
>>> print f.as_ul()
<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="..." selected="selected">Mike Royko</option>
<option value="...">Bob Woodward</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="1" selected="selected">Entertainment</option>
<option value="2">It's a test</option>
<option value="3">Third test</option>
</select> Hold down "Control", or "Command" on a Mac, to select more than one.</li>
Initial values can be provided for model forms
>>> f = TestArticleForm(auto_id=False, initial={'headline': 'Your headline here', 'categories': ['1','2']})
>>> print f.as_ul()
<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="...">Mike Royko</option>
<option value="...">Bob Woodward</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="1" selected="selected">Entertainment</option>
<option value="2" selected="selected">It's a test</option>
<option value="3">Third test</option>
</select> Hold down "Control", or "Command" on a Mac, to select more than one.</li>
>>> f = TestArticleForm({'headline': u'New headline', 'slug': u'new-headline', 'pub_date': u'1988-01-04',
... 'writer': unicode(w_royko.pk), 'article': u'Hello.', 'categories': [u'1', u'2']}, instance=new_art)
>>> new_art = f.save()
>>> new_art.id
1
>>> new_art = Article.objects.get(id=1)
>>> new_art.categories.order_by('name')
[<Category: Entertainment>, <Category: It's a test>]
Now, submit form data with no categories. This deletes the existing categories.
>>> f = TestArticleForm({'headline': u'New headline', 'slug': u'new-headline', 'pub_date': u'1988-01-04',
... 'writer': unicode(w_royko.pk), 'article': u'Hello.'}, instance=new_art)
>>> new_art = f.save()
>>> new_art.id
1
>>> new_art = Article.objects.get(id=1)
>>> new_art.categories.all()
[]
Create a new article, with categories, via the form.
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = ArticleForm({'headline': u'The walrus was Paul', 'slug': u'walrus-was-paul', 'pub_date': u'1967-11-01',
... 'writer': unicode(w_royko.pk), 'article': u'Test.', 'categories': [u'1', u'2']})
>>> new_art = f.save()
>>> new_art.id
2
>>> new_art = Article.objects.get(id=2)
>>> new_art.categories.order_by('name')
[<Category: Entertainment>, <Category: It's a test>]
Create a new article, with no categories, via the form.
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = ArticleForm({'headline': u'The walrus was Paul', 'slug': u'walrus-was-paul', 'pub_date': u'1967-11-01',
... 'writer': unicode(w_royko.pk), 'article': u'Test.'})
>>> new_art = f.save()
>>> new_art.id
3
>>> new_art = Article.objects.get(id=3)
>>> new_art.categories.all()
[]
Create a new article, with categories, via the form, but use commit=False.
The m2m data won't be saved until save_m2m() is invoked on the form.
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = ArticleForm({'headline': u'The walrus was Paul', 'slug': 'walrus-was-paul', 'pub_date': u'1967-11-01',
... 'writer': unicode(w_royko.pk), 'article': u'Test.', 'categories': [u'1', u'2']})
>>> new_art = f.save(commit=False)
# Manually save the instance
>>> new_art.save()
>>> new_art.id
4
# The instance doesn't have m2m data yet
>>> new_art = Article.objects.get(id=4)
>>> new_art.categories.all()
[]
# Save the m2m data on the form
>>> f.save_m2m()
>>> new_art.categories.order_by('name')
[<Category: Entertainment>, <Category: It's a test>]
Here, we define a custom ModelForm. Because it happens to have the same fields as
the Category model, we can just call the form's save() to apply its changes to an
existing Category instance.
>>> class ShortCategory(ModelForm):
... name = CharField(max_length=5)
... slug = CharField(max_length=5)
... url = CharField(max_length=3)
>>> cat = Category.objects.get(name='Third test')
>>> cat
<Category: Third test>
>>> cat.id
3
>>> form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
>>> form.save()
<Category: Third>
>>> Category.objects.get(id=3)
<Category: Third>
Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
at runtime, based on the data in the database when the form is displayed, not
the data in the database when the form is instantiated.
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = ArticleForm(auto_id=False)
>>> print f.as_ul()
<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="...">Mike Royko</option>
<option value="...">Bob Woodward</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="1">Entertainment</option>
<option value="2">It's a test</option>
<option value="3">Third</option>
</select> Hold down "Control", or "Command" on a Mac, to select more than one.</li>
>>> Category.objects.create(name='Fourth', url='4th')
<Category: Fourth>
>>> Writer.objects.create(name='Carl Bernstein')
<Writer: Carl Bernstein>
>>> print f.as_ul()
<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="...">Mike Royko</option>
<option value="...">Bob Woodward</option>
<option value="...">Carl Bernstein</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="1">Entertainment</option>
<option value="2">It's a test</option>
<option value="3">Third</option>
<option value="4">Fourth</option>
</select> Hold down "Control", or "Command" on a Mac, to select more than one.</li>
# ModelChoiceField ############################################################
>>> from django.forms import ModelChoiceField, ModelMultipleChoiceField
>>> f = ModelChoiceField(Category.objects.all())
>>> list(f.choices)
[(u'', u'---------'), (1, u'Entertainment'), (2, u"It's a test"), (3, u'Third'), (4, u'Fourth')]
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(0)
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. That choice is not one of the available choices.']
>>> f.clean(3)
<Category: Third>
>>> f.clean(2)
<Category: It's a test>
# Add a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
>>> Category.objects.create(name='Fifth', url='5th')
<Category: Fifth>
>>> f.clean(5)
<Category: Fifth>
# Delete a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
>>> Category.objects.get(url='5th').delete()
>>> f.clean(5)
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. That choice is not one of the available choices.']
>>> f = ModelChoiceField(Category.objects.filter(pk=1), required=False)
>>> print f.clean('')
None
>>> f.clean('')
>>> f.clean('1')
<Category: Entertainment>
>>> f.clean('100')
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. That choice is not one of the available choices.']
# queryset can be changed after the field is created.
>>> f.queryset = Category.objects.exclude(name='Fourth')
>>> list(f.choices)
[(u'', u'---------'), (1, u'Entertainment'), (2, u"It's a test"), (3, u'Third')]
>>> f.clean(3)
<Category: Third>
>>> f.clean(4)
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. That choice is not one of the available choices.']
# check that we can safely iterate choices repeatedly
>>> gen_one = list(f.choices)
>>> gen_two = f.choices
>>> gen_one[2]
(2L, u"It's a test")
>>> list(gen_two)
[(u'', u'---------'), (1L, u'Entertainment'), (2L, u"It's a test"), (3L, u'Third')]
# check that we can override the label_from_instance method to print custom labels (#4620)
>>> f.queryset = Category.objects.all()
>>> f.label_from_instance = lambda obj: "category " + str(obj)
>>> list(f.choices)
[(u'', u'---------'), (1L, 'category Entertainment'), (2L, "category It's a test"), (3L, 'category Third'), (4L, 'category Fourth')]
# ModelMultipleChoiceField ####################################################
>>> f = ModelMultipleChoiceField(Category.objects.all())
>>> list(f.choices)
[(1, u'Entertainment'), (2, u"It's a test"), (3, u'Third'), (4, u'Fourth')]
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean([])
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean([1])
[<Category: Entertainment>]
>>> f.clean([2])
[<Category: It's a test>]
>>> f.clean(['1'])
[<Category: Entertainment>]
>>> f.clean(['1', '2'])
[<Category: Entertainment>, <Category: It's a test>]
>>> f.clean([1, '2'])
[<Category: Entertainment>, <Category: It's a test>]
>>> f.clean((1, '2'))
[<Category: Entertainment>, <Category: It's a test>]
>>> f.clean(['100'])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 100 is not one of the available choices.']
>>> f.clean('hello')
Traceback (most recent call last):
...
ValidationError: [u'Enter a list of values.']
>>> f.clean(['fail'])
Traceback (most recent call last):
...
ValidationError: [u'"fail" is not a valid value for a primary key.']
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
>>> Category.objects.create(id=6, name='Sixth', url='6th')
<Category: Sixth>
>>> f.clean([6])
[<Category: Sixth>]
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
>>> Category.objects.get(url='6th').delete()
>>> f.clean([6])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 6 is not one of the available choices.']
>>> f = ModelMultipleChoiceField(Category.objects.all(), required=False)
>>> f.clean([])
[]
>>> f.clean(())
[]
>>> f.clean(['10'])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 10 is not one of the available choices.']
>>> f.clean(['3', '10'])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 10 is not one of the available choices.']
>>> f.clean(['1', '10'])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 10 is not one of the available choices.']
# queryset can be changed after the field is created.
>>> f.queryset = Category.objects.exclude(name='Fourth')
>>> list(f.choices)
[(1, u'Entertainment'), (2, u"It's a test"), (3, u'Third')]
>>> f.clean([3])
[<Category: Third>]
>>> f.clean([4])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 4 is not one of the available choices.']
>>> f.clean(['3', '4'])
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 4 is not one of the available choices.']
>>> f.queryset = Category.objects.all()
>>> f.label_from_instance = lambda obj: "multicategory " + str(obj)
>>> list(f.choices)
[(1L, 'multicategory Entertainment'), (2L, "multicategory It's a test"), (3L, 'multicategory Third'), (4L, 'multicategory Fourth')]
# OneToOneField ###############################################################
>>> class ImprovedArticleForm(ModelForm):
... class Meta:
... model = ImprovedArticle
>>> ImprovedArticleForm.base_fields.keys()
['article']
>>> class ImprovedArticleWithParentLinkForm(ModelForm):
... class Meta:
... model = ImprovedArticleWithParentLink
>>> ImprovedArticleWithParentLinkForm.base_fields.keys()
[]
>>> bw = BetterWriter(name=u'Joe Better', score=10)
>>> bw.save()
>>> sorted(model_to_dict(bw).keys())
['id', 'name', 'score', 'writer_ptr']
>>> class BetterWriterForm(ModelForm):
... class Meta:
... model = BetterWriter
>>> form = BetterWriterForm({'name': 'Some Name', 'score': 12})
>>> form.is_valid()
True
>>> bw2 = form.save()
>>> bw2.delete()
>>> class WriterProfileForm(ModelForm):
... class Meta:
... model = WriterProfile
>>> form = WriterProfileForm()
>>> print form.as_p()
<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="" selected="selected">---------</option>
<option value="...">Mike Royko</option>
<option value="...">Bob Woodward</option>
<option value="...">Carl Bernstein</option>
<option value="...">Joe Better</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="text" name="age" id="id_age" /></p>
>>> data = {
... 'writer': unicode(w_woodward.pk),
... 'age': u'65',
... }
>>> form = WriterProfileForm(data)
>>> instance = form.save()
>>> instance
<WriterProfile: Bob Woodward is 65>
>>> form = WriterProfileForm(instance=instance)
>>> print form.as_p()
<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="">---------</option>
<option value="...">Mike Royko</option>
<option value="..." selected="selected">Bob Woodward</option>
<option value="...">Carl Bernstein</option>
<option value="...">Joe Better</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="text" name="age" value="65" id="id_age" /></p>
# PhoneNumberField ############################################################
>>> class PhoneNumberForm(ModelForm):
... class Meta:
... model = PhoneNumber
>>> f = PhoneNumberForm({'phone': '(312) 555-1212', 'description': 'Assistance'})
>>> f.is_valid()
True
>>> f.cleaned_data['phone']
u'312-555-1212'
>>> f.cleaned_data['description']
u'Assistance'
# FileField ###################################################################
# File forms.
>>> class TextFileForm(ModelForm):
... class Meta:
... model = TextFile
# Test conditions when files is either not given or empty.
>>> f = TextFileForm(data={'description': u'Assistance'})
>>> f.is_valid()
False
>>> f = TextFileForm(data={'description': u'Assistance'}, files={})
>>> f.is_valid()
False
# Upload a file and ensure it all works as expected.
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test1.txt', 'hello world')})
>>> f.is_valid()
True
>>> type(f.cleaned_data['file'])
<class 'django.core.files.uploadedfile.SimpleUploadedFile'>
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test1.txt>
>>> instance.file.delete()
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test1.txt', 'hello world')})
>>> f.is_valid()
True
>>> type(f.cleaned_data['file'])
<class 'django.core.files.uploadedfile.SimpleUploadedFile'>
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test1.txt>
# Check if the max_length attribute has been inherited from the model.
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test-maxlength.txt', 'hello world')})
>>> f.is_valid()
False
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
>>> f = TextFileForm(data={'description': u'Assistance'}, instance=instance)
>>> f.is_valid()
True
>>> f.cleaned_data['file']
<FieldFile: tests/test1.txt>
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test1.txt>
# Delete the current file since this is not done by Django.
>>> instance.file.delete()
# Override the file by uploading a new one.
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test2.txt', 'hello world')}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test2.txt>
# Delete the current file since this is not done by Django.
>>> instance.file.delete()
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test2.txt', 'hello world')})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test2.txt>
# Delete the current file since this is not done by Django.
>>> instance.file.delete()
>>> instance.delete()
# Test the non-required FileField
>>> f = TextFileForm(data={'description': u'Assistance'})
>>> f.fields['file'].required = False
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
<FieldFile: None>
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test3.txt', 'hello world')}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test3.txt>
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
>>> f = TextFileForm(data={'description': u'New Description'}, instance=instance)
>>> f.fields['file'].required = False
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.description
u'New Description'
>>> instance.file
<FieldFile: tests/test3.txt>
# Delete the current file since this is not done by Django.
>>> instance.file.delete()
>>> instance.delete()
>>> f = TextFileForm(data={'description': u'Assistance'}, files={'file': SimpleUploadedFile('test3.txt', 'hello world')})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.file
<FieldFile: tests/test3.txt>
# Delete the current file since this is not done by Django.
>>> instance.file.delete()
>>> instance.delete()
# BigIntegerField ################################################################
>>> class BigIntForm(forms.ModelForm):
... class Meta:
... model = BigInt
...
>>> bif = BigIntForm({'biggie': '-9223372036854775808'})
>>> bif.is_valid()
True
>>> bif = BigIntForm({'biggie': '-9223372036854775809'})
>>> bif.is_valid()
False
>>> bif.errors
{'biggie': [u'Ensure this value is greater than or equal to -9223372036854775808.']}
>>> bif = BigIntForm({'biggie': '9223372036854775807'})
>>> bif.is_valid()
True
>>> bif = BigIntForm({'biggie': '9223372036854775808'})
>>> bif.is_valid()
False
>>> bif.errors
{'biggie': [u'Ensure this value is less than or equal to 9223372036854775807.']}
"""}
if test_images:
__test__['API_TESTS'] += """
# ImageField ###################################################################
# ImageField and FileField are nearly identical, but they differ slighty when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
>>> class ImageFileForm(ModelForm):
... class Meta:
... model = ImageFile
>>> image_data = open(os.path.join(os.path.dirname(__file__), "test.png"), 'rb').read()
>>> image_data2 = open(os.path.join(os.path.dirname(__file__), "test2.png"), 'rb').read()
>>> f = ImageFileForm(data={'description': u'An image'}, files={'image': SimpleUploadedFile('test.png', image_data)})
>>> f.is_valid()
True
>>> type(f.cleaned_data['image'])
<class 'django.core.files.uploadedfile.SimpleUploadedFile'>
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test.png>
>>> instance.width
16
>>> instance.height
16
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
>>> instance.image.delete(save=False)
>>> f = ImageFileForm(data={'description': u'An image'}, files={'image': SimpleUploadedFile('test.png', image_data)})
>>> f.is_valid()
True
>>> type(f.cleaned_data['image'])
<class 'django.core.files.uploadedfile.SimpleUploadedFile'>
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test.png>
>>> instance.width
16
>>> instance.height
16
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
>>> f = ImageFileForm(data={'description': u'Look, it changed'}, instance=instance)
>>> f.is_valid()
True
>>> f.cleaned_data['image']
<...FieldFile: tests/test.png>
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test.png>
>>> instance.height
16
>>> instance.width
16
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
>>> instance.image.delete(save=False)
# Override the file by uploading a new one.
>>> f = ImageFileForm(data={'description': u'Changed it'}, files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test2.png>
>>> instance.height
32
>>> instance.width
48
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
>>> instance.image.delete(save=False)
>>> instance.delete()
>>> f = ImageFileForm(data={'description': u'Changed it'}, files={'image': SimpleUploadedFile('test2.png', image_data2)})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test2.png>
>>> instance.height
32
>>> instance.width
48
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
>>> instance.image.delete(save=False)
>>> instance.delete()
# Test the non-required ImageField
>>> class OptionalImageFileForm(ModelForm):
... class Meta:
... model = OptionalImageFile
>>> f = OptionalImageFileForm(data={'description': u'Test'})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
<...FieldFile: None>
>>> instance.width
>>> instance.height
>>> f = OptionalImageFileForm(data={'description': u'And a final one'}, files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test3.png>
>>> instance.width
16
>>> instance.height
16
# Editing the instance without re-uploading the image should not affect the image or its width/height properties
>>> f = OptionalImageFileForm(data={'description': u'New Description'}, instance=instance)
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.description
u'New Description'
>>> instance.image
<...FieldFile: tests/test3.png>
>>> instance.width
16
>>> instance.height
16
# Delete the current file since this is not done by Django.
>>> instance.image.delete()
>>> instance.delete()
>>> f = OptionalImageFileForm(data={'description': u'And a final one'}, files={'image': SimpleUploadedFile('test4.png', image_data2)})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
<...FieldFile: tests/test4.png>
>>> instance.width
48
>>> instance.height
32
>>> instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
>>> f = ImageFileForm(data={'description': u'And a final one', 'path': 'foo'}, files={'image': SimpleUploadedFile('test4.png', image_data)})
>>> f.is_valid()
True
>>> instance = f.save()
>>> instance.image
<...FieldFile: foo/test4.png>
>>> instance.delete()
"""
__test__['API_TESTS'] += """
# Media on a ModelForm ########################################################
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
>>> class ModelFormWithMedia(ModelForm):
... class Media:
... js = ('/some/form/javascript',)
... css = {
... 'all': ('/some/form/css',)
... }
... class Meta:
... model = PhoneNumber
>>> f = ModelFormWithMedia()
>>> print f.media
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/form/javascript"></script>
>>> class CommaSeparatedIntegerForm(ModelForm):
... class Meta:
... model = CommaSeparatedInteger
>>> f = CommaSeparatedIntegerForm({'field': '1,2,3'})
>>> f.is_valid()
True
>>> f.cleaned_data
{'field': u'1,2,3'}
>>> f = CommaSeparatedIntegerForm({'field': '1a,2'})
>>> f.errors
{'field': [u'Enter only digits separated by commas.']}
>>> f = CommaSeparatedIntegerForm({'field': ',,,,'})
>>> f.is_valid()
True
>>> f.cleaned_data
{'field': u',,,,'}
>>> f = CommaSeparatedIntegerForm({'field': '1.2'})
>>> f.errors
{'field': [u'Enter only digits separated by commas.']}
>>> f = CommaSeparatedIntegerForm({'field': '1,a,2'})
>>> f.errors
{'field': [u'Enter only digits separated by commas.']}
>>> f = CommaSeparatedIntegerForm({'field': '1,,2'})
>>> f.is_valid()
True
>>> f.cleaned_data
{'field': u'1,,2'}
>>> f = CommaSeparatedIntegerForm({'field': '1'})
>>> f.is_valid()
True
>>> f.cleaned_data
{'field': u'1'}
This Price instance generated by this form is not valid because the quantity
field is required, but the form is valid because the field is excluded from
the form. This is for backwards compatibility.
>>> class PriceForm(ModelForm):
... class Meta:
... model = Price
... exclude = ('quantity',)
>>> form = PriceForm({'price': '6.00'})
>>> form.is_valid()
True
>>> price = form.save(commit=False)
>>> price.full_clean()
Traceback (most recent call last):
...
ValidationError: {'quantity': [u'This field cannot be null.']}
The form should not validate fields that it doesn't contain even if they are
specified using 'fields', not 'exclude'.
... class Meta:
... model = Price
... fields = ('price',)
>>> form = PriceForm({'price': '6.00'})
>>> form.is_valid()
True
The form should still have an instance of a model that is not complete and
not saved into a DB yet.
>>> form.instance.price
Decimal('6.00')
>>> form.instance.quantity is None
True
>>> form.instance.pk is None
True
# Choices on CharField and IntegerField
>>> class ArticleForm(ModelForm):
... class Meta:
... model = Article
>>> f = ArticleForm()
>>> f.fields['status'].clean('42')
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. 42 is not one of the available choices.']
>>> class ArticleStatusForm(ModelForm):
... class Meta:
... model = ArticleStatus
>>> f = ArticleStatusForm()
>>> f.fields['status'].clean('z')
Traceback (most recent call last):
...
ValidationError: [u'Select a valid choice. z is not one of the available choices.']
# Foreign keys which use to_field #############################################
>>> apple = Inventory.objects.create(barcode=86, name='Apple')
>>> pear = Inventory.objects.create(barcode=22, name='Pear')
>>> core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
>>> field = ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
>>> for choice in field.choices:
... print choice
(u'', u'---------')
(86, u'Apple')
(22, u'Pear')
(87, u'Core')
>>> class InventoryForm(ModelForm):
... class Meta:
... model = Inventory
>>> form = InventoryForm(instance=core)
>>> print form['parent']
<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected="selected">Apple</option>
<option value="22">Pear</option>
<option value="87">Core</option>
</select>
>>> data = model_to_dict(core)
>>> data['parent'] = '22'
>>> form = InventoryForm(data=data, instance=core)
>>> core = form.save()
>>> core.parent
<Inventory: Pear>
>>> class CategoryForm(ModelForm):
... description = forms.CharField()
... class Meta:
... model = Category
... fields = ['description', 'url']
>>> CategoryForm.base_fields.keys()
['description', 'url']
>>> print CategoryForm()
<tr><th><label for="id_description">Description:</label></th><td><input type="text" name="description" id="id_description" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th><td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
# Model field that returns None to exclude itself with explicit fields ########
>>> class CustomFieldForExclusionForm(ModelForm):
... class Meta:
... model = CustomFieldForExclusionModel
... fields = ['name', 'markup']
>>> CustomFieldForExclusionForm.base_fields.keys()
['name']
>>> print CustomFieldForExclusionForm()
<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" maxlength="10" /></td></tr>
# Clean up
>>> import shutil
>>> shutil.rmtree(temp_storage_dir)
"""
| gpl-3.0 |
mtp1376/youtube-dl | youtube_dl/extractor/hornbunny.py | 169 | 1813 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
)
class HornBunnyIE(InfoExtractor):
_VALID_URL = r'http?://(?:www\.)?hornbunny\.com/videos/(?P<title_dash>[a-z-]+)-(?P<id>\d+)\.html'
_TEST = {
'url': 'http://hornbunny.com/videos/panty-slut-jerk-off-instruction-5227.html',
'md5': '95e40865aedd08eff60272b704852ad7',
'info_dict': {
'id': '5227',
'ext': 'flv',
'title': 'panty slut jerk off instruction',
'duration': 550,
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(
url, video_id, note='Downloading initial webpage')
title = self._html_search_regex(
r'class="title">(.*?)</h2>', webpage, 'title')
redirect_url = self._html_search_regex(
r'pg&settings=(.*?)\|0"\);', webpage, 'title')
webpage2 = self._download_webpage(redirect_url, video_id)
video_url = self._html_search_regex(
r'flvMask:(.*?);', webpage2, 'video_url')
duration = parse_duration(self._search_regex(
r'<strong>Runtime:</strong>\s*([0-9:]+)</div>',
webpage, 'duration', fatal=False))
view_count = int_or_none(self._search_regex(
r'<strong>Views:</strong>\s*(\d+)</div>',
webpage, 'view count', fatal=False))
return {
'id': video_id,
'url': video_url,
'title': title,
'ext': 'flv',
'duration': duration,
'view_count': view_count,
'age_limit': 18,
}
| unlicense |
2014c2g4/2015cda_g7 | static/Brython3.1.1-20150328-091302/Lib/inspect.py | 637 | 78935 | """Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
getfullargspec() - same, with support for Python-3000 features
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
signature() - get a Signature object for the callable
"""
# This module is in the public domain. No warranties.
__author__ = ('Ka-Ping Yee <ping@lfw.org>',
'Yury Selivanov <yselivanov@sprymix.com>')
import imp
import importlib.machinery
import itertools
import linecache
import os
import re
import sys
import tokenize
import types
import warnings
import functools
import builtins
from operator import attrgetter
from collections import namedtuple, OrderedDict
# Create constants for the compiler flags in Include/code.h
# We try to get them from dis to avoid duplication, but fall
# back to hardcoding so the dependency is optional
try:
from dis import COMPILER_FLAG_NAMES as _flag_names
except ImportError:
CO_OPTIMIZED, CO_NEWLOCALS = 0x1, 0x2
CO_VARARGS, CO_VARKEYWORDS = 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
else:
mod_dict = globals()
for k, v in _flag_names.items():
mod_dict["CO_" + v] = k
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__cached__ pathname to byte compiled file
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__set__") and hasattr(tp, "__get__")
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See help(isfunction) for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
if isclass(object):
mro = (object,) + getmro(object)
else:
mro = ()
results = []
for key in dir(object):
# First try to get the value via __dict__. Some descriptors don't
# like calling their __get__ (see bug #1785).
for base in mro:
if key in base.__dict__:
value = base.__dict__[key]
break
else:
try:
value = getattr(object, key)
except AttributeError:
continue
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name, and where it was defined.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
# Furthermore, some objects may raise an Exception when fetched with
# getattr(). This is the case with some descriptors (bug #1785).
# Thus, we only use getattr() as a last resort.
homecls = None
for base in (cls,) + mro:
if name in base.__dict__:
obj = base.__dict__[name]
homecls = base
break
else:
obj = getattr(cls, name)
homecls = getattr(obj, "__objclass__", homecls)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif ismethoddescriptor(obj):
kind = "method"
elif isdatadescriptor(obj):
kind = "data"
else:
obj_via_getattr = getattr(cls, name)
if (isfunction(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
obj = obj_via_getattr
result.append(Attribute(name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
return cls.__mro__
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
warnings.warn('inspect.getmoduleinfo() is deprecated', DeprecationWarning,
2)
filename = os.path.basename(path)
suffixes = [(-len(suffix), suffix, mode, mtype)
for suffix, mode, mtype in imp.get_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
fname = os.path.basename(path)
# Check for paths that look like an actual module file
suffixes = [(-len(suffix), suffix)
for suffix in importlib.machinery.all_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix in suffixes:
if fname.endswith(suffix):
return fname[:neglen]
return None
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]
all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]
if any(filename.endswith(s) for s in all_bytecode_suffixes):
filename = (os.path.splitext(filename)[0] +
importlib.machinery.SOURCE_SUFFIXES[0])
elif any(filename.endswith(s) for s in
importlib.machinery.EXTENSION_SUFFIXES):
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if hasattr(getmodule(object, filename), '__loader__'):
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in list(sys.modules.items()):
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getfile(object)
sourcefile = getsourcefile(object)
if not sourcefile and file[:1] + file[-1:] != '<>':
raise IOError('source code not available')
file = sourcefile if sourcefile else file
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (IOError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
if c not in children[parent]:
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names. Keyword-only arguments are
appended. 'varargs' and 'varkw' are the names of the * and **
arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names, and 'varargs'
and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names.
'args' will include keyword-only argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Use the getfullargspec() API for Python-3000 code, as annotations
and keyword arguments are supported. getargspec() will raise ValueError
if the func has either annotations or keyword arguments.
"""
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only arguments or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
'kwonlyargs' is a list of keyword-only argument names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping argument names to annotations.
The first four items in the tuple correspond to getargspec().
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError('{!r} is not a Python function'.format(func))
args, varargs, kwonlyargs, varkw = _getfullargs(func.__code__)
return FullArgSpec(args, varargs, varkw, func.__defaults__,
kwonlyargs, func.__kwdefaults__, func.__annotations__)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
#brython fix me
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Format an argument spec from the values returned by getargspec
or getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments."""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
#brython fix me
#def formatargvalues(args, varargs, varkw, locals,
# formatarg=str,
# formatvarargs=lambda name: '*' + name,
# formatvarkw=lambda name: '**' + name,
# formatvalue=lambda value: '=' + repr(value)):
# """Format an argument spec from the 4 values returned by getargvalues.
# The first four arguments are (args, varargs, varkw, locals). The
# next four arguments are the corresponding optional formatting functions
# that are called to turn names and values into strings. The ninth
# argument is an optional function to format the sequence of arguments."""
# def convert(name, locals=locals,
# formatarg=formatarg, formatvalue=formatvalue):
# return formatarg(name) + formatvalue(locals[name])
# specs = []
# for i in range(len(args)):
# specs.append(convert(args[i]))
# if varargs:
# specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
# if varkw:
# specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
# return '(' + ', '.join(specs) + ')'
def _missing_arguments(f_name, argnames, pos, values):
names = [repr(name) for name in argnames if name not in values]
missing = len(names)
if missing == 1:
s = names[0]
elif missing == 2:
s = "{} and {}".format(*names)
else:
tail = ", {} and {}".format(names[-2:])
del names[-2:]
s = ", ".join(names) + tail
raise TypeError("%s() missing %i required %s argument%s: %s" %
(f_name, missing,
"positional" if pos else "keyword-only",
"" if missing == 1 else "s", s))
def _too_many(f_name, args, kwonly, varargs, defcount, given, values):
atleast = len(args) - defcount
kwonly_given = len([arg for arg in kwonly if arg in values])
if varargs:
plural = atleast != 1
sig = "at least %d" % (atleast,)
elif defcount:
plural = True
sig = "from %d to %d" % (atleast, len(args))
else:
plural = len(args) != 1
sig = str(len(args))
kwonly_sig = ""
if kwonly_given:
msg = " positional argument%s (and %d keyword-only argument%s)"
kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,
"s" if kwonly_given != 1 else ""))
raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %
(f_name, sig, "s" if plural else "", given, kwonly_sig,
"was" if given == 1 and not kwonly_given else "were"))
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
spec = getfullargspec(func)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
f_name = func.__name__
arg2value = {}
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
n = min(num_pos, num_args)
for i in range(n):
arg2value[args[i]] = positional[i]
if varargs:
arg2value[varargs] = tuple(positional[n:])
possible_kwargs = set(args + kwonlyargs)
if varkw:
arg2value[varkw] = {}
for kw, value in named.items():
if kw not in possible_kwargs:
if not varkw:
raise TypeError("%s() got an unexpected keyword argument %r" %
(f_name, kw))
arg2value[varkw][kw] = value
continue
if kw in arg2value:
raise TypeError("%s() got multiple values for argument %r" %
(f_name, kw))
arg2value[kw] = value
if num_pos > num_args and not varargs:
_too_many(f_name, args, kwonlyargs, varargs, num_defaults,
num_pos, arg2value)
if num_pos < num_args:
req = args[:num_args - num_defaults]
for arg in req:
if arg not in arg2value:
_missing_arguments(f_name, req, True, arg2value)
for i, arg in enumerate(args[num_args - num_defaults:]):
if arg not in arg2value:
arg2value[arg] = defaults[i]
missing = 0
for kwarg in kwonlyargs:
if kwarg not in arg2value:
if kwarg in kwonlydefaults:
arg2value[kwarg] = kwonlydefaults[kwarg]
else:
missing += 1
if missing:
_missing_arguments(f_name, kwonlyargs, False, arg2value)
return arg2value
ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError("'{!r}' is not a Python function".format(func))
code = func.__code__
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if func.__closure__ is None:
nonlocal_vars = {}
else:
nonlocal_vars = {
var : cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)
}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = func.__globals__
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in code.co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame of the caller or None if this is not possible."""
return sys._getframe(1) if hasattr(sys, "_getframe") else None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
# ------------------------------------------------ static version of getattr
_sentinel = object()
def _static_getmro(klass):
return type.__dict__['__mro__'].__get__(klass)
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or
type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if (_check_class(type(klass_result), '__get__') is not _sentinel and
_check_class(type(klass_result), '__set__') is not _sentinel):
return klass_result
if instance_result is not _sentinel:
return instance_result
if klass_result is not _sentinel:
return klass_result
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
if default is not _sentinel:
return default
raise AttributeError(attr)
# ------------------------------------------------ generator introspection
GEN_CREATED = 'GEN_CREATED'
GEN_RUNNING = 'GEN_RUNNING'
GEN_SUSPENDED = 'GEN_SUSPENDED'
GEN_CLOSED = 'GEN_CLOSED'
def getgeneratorstate(generator):
"""Get current state of a generator-iterator.
Possible states are:
GEN_CREATED: Waiting to start execution.
GEN_RUNNING: Currently being executed by the interpreter.
GEN_SUSPENDED: Currently suspended at a yield expression.
GEN_CLOSED: Execution has completed.
"""
if generator.gi_running:
return GEN_RUNNING
if generator.gi_frame is None:
return GEN_CLOSED
if generator.gi_frame.f_lasti == -1:
return GEN_CREATED
return GEN_SUSPENDED
def getgeneratorlocals(generator):
"""
Get the mapping of generator local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
if not isgenerator(generator):
raise TypeError("'{!r}' is not a Python generator".format(generator))
frame = getattr(generator, "gi_frame", None)
if frame is not None:
return generator.gi_frame.f_locals
else:
return {}
###############################################################################
### Function Signature Object (PEP 362)
###############################################################################
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType)
def _get_user_defined_method(cls, method_name):
try:
meth = getattr(cls, method_name)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
sig = signature(obj.__func__)
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {!r} has incorrect arguments'.format(obj)
raise ValueError(msg) from ex
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=new_params.values())
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {!r} is not supported by signature'.format(obj))
class _void:
'''A private marker - used in Parameter & Signature'''
class _empty:
pass
class _ParameterKind(int):
def __new__(self, *args, name):
obj = int.__new__(self, *args)
obj._name = name
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter:
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, *, default=_empty, annotation=_empty,
_partial_kwarg=False):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError("None is not a valid name for a "
"non-positional-only parameter")
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not name.isidentifier():
msg = '{!r} is not a valid parameter name'.format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, *, name=_void, kind=_void, annotation=_void,
default=_void, _partial_kwarg=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(name, kind, default=default, annotation=annotation,
_partial_kwarg=_partial_kwarg)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ''
formatted = '<{}>'.format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{}:{}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{}={}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{} at {:#x} {!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments:
'''Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature:
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, *, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = 'wrong parameter order: {} before {}'
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = 'duplicate parameter name: {!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = types.MappingProxyType(params)
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
if not isinstance(func, types.FunctionType):
raise TypeError('{!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = func_code.co_kwonlyargcount
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = func.__annotations__
defaults = func.__defaults__
kwdefaults = func.__kwdefaults__
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=False)
@property
def parameters(self):
return self._parameters
@property
def return_annotation(self):
return self._return_annotation
def replace(self, *, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = {param: idx
for idx, param in enumerate(other.parameters.keys())}
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, *, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if (param._partial_kwarg and param_name not in kwargs):
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments') from None
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name)) from None
else:
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(__bind_self, *args, **kwargs):
'''Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
'''
return __bind_self._bind(args, kwargs)
def bind_partial(__bind_self, *args, **kwargs):
'''Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
'''
return __bind_self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = '({})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
| gpl-3.0 |
mfnch/pyrtist | pyrtist/examples/planes.py | 1 | 2659 | #!PYRTIST:VERSION:0:0:1
from pyrtist.lib2d import Point, Tri
#!PYRTIST:REFPOINTS:BEGIN
bbox1 = Point(-37.5826573705, 64.8614862385)
bbox2 = Point(146.843632129, -72.3863575296)
bg1 = Point(54.63065, -25.8746116667)
bg2 = Point(132.618249036, -25.8746116667)
fg1 = Point(25.9196635922, -64.5110530769)
fg2 = Point(127.536394984, -65.2736212928)
gui1 = Point(-35.0423175368, -17.7413972927)
gui2 = Point(-3.16141042462, -15.7586425439)
gui3 = Point(17.4404749672, -9.35403111111)
gui4 = Point(18.3549837973, -3.50828722222)
gui5 = Point(17.7961162534, 3.099945)
#!PYRTIST:REFPOINTS:END
from pyrtist.lib2d import *
from pyrtist.lib2d.geom import intersection
from pyrtist.lib2d.tools import Bar, Sphere
w = Window()
w.BBox(bbox1, bbox2)
# Colors of the planes.
c1 = Color.yellow
c2 = Color.red
c3 = Color.green
c4 = Color.blue
# Some controls to let you see through the planes.
plane1 = Bar(pos=gui1, cursor=gui2, fg_color=c1) >> w
plane2 = Bar(cursor=gui3, fg_color=c2) >> w
plane3 = Bar(cursor=gui4, fg_color=c3) >> w
plane4 = Bar(cursor=gui5, fg_color=c4) >> w
c1.a = plane1.value
c2.a = plane2.value
c3.a = plane3.value
c4.a = plane4.value
fg2.y = fg1.y
lx = fg2 - fg1
ly = Point(0, lx.x)
fg3 = fg1 + ly
fg4 = fg2 + ly
bg2.y = bg1.y
lx = bg2 - bg1
ly = Point(0, lx.x)
bg3 = bg1 + ly
bg4 = bg2 + ly
center = intersection((fg1, bg4), (fg3, bg2))
down = intersection((fg1, bg2), (bg1, fg2))
up = intersection((fg3, bg4), (bg3, fg4))
front = intersection((fg1, fg4), (fg2, fg3))
rear = intersection((bg1, bg4), (bg2, bg3))
sph = Sphere(Color.blue, 2)
w << Args(Sphere(sph, bg1), Sphere(sph, bg2),
Sphere(sph, bg3), Sphere(sph, bg4))
s1 = Style(Border(Color.black, 0.2, Join.round))
w << Args(
Poly(c4, s1, bg2, rear, center),
Poly(c3, s1, bg4, rear, center),
Poly(c3, s1, bg1, rear, center),
Poly(c1, s1, bg2, down, center),
Poly(c4, s1, fg2, center, bg2),
Poly(c1, s1, bg2, center, bg4),
Poly(c4, s1, bg3, rear, center),
Poly(c2, s1, bg1, center, bg3),
Poly(c2, s1, fg2, center, fg4),
Poly(c2, s1, bg1, down, center),
Poly(c2, s1, fg2, down, center),
Poly(c1, s1, bg4, up, center),
Poly(c3, s1, fg1, center, bg1),
Poly(c3, s1, fg4, center, bg4),
Poly(c2, s1, bg3, up, center),
Poly(c4, s1, fg3, center, bg3),
Poly(c2, s1, fg4, up, center),
Poly(c1, s1, fg1, center, fg3),
Poly(c1, s1, fg1, down, center),
Poly(c1, s1, fg3, up, center),
Poly(c4, s1, fg2, front, center),
Poly(c3, s1, fg1, front, center),
Poly(c3, s1, fg4, front, center),
Poly(c4, s1, fg3, front, center)
)
sph = Sphere(sph, 3)
w << Args(Sphere(sph, fg1), Sphere(sph, fg2),
Sphere(sph, fg3), Sphere(sph, fg4))
gui(w)
| lgpl-2.1 |
yangsensen/Crawler | X500/bs4/builder/_html5lib.py | 423 | 10647 | __all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, basestring):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, basestring) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, basestring):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent.element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
first_child.previous_element = new_parents_last_descendant
first_child.previous_sibling = new_parents_last_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| gpl-2.0 |
j-carl/ansible | test/support/integration/plugins/modules/cs_role.py | 31 | 5384 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_role
short_description: Manages user roles on Apache CloudStack based clouds.
description:
- Create, update, delete user roles.
version_added: '2.3'
author: René Moser (@resmo)
options:
name:
description:
- Name of the role.
type: str
required: true
uuid:
description:
- ID of the role.
- If provided, I(uuid) is used as key.
type: str
aliases: [ id ]
role_type:
description:
- Type of the role.
- Only considered for creation.
type: str
default: User
choices: [ User, DomainAdmin, ResourceAdmin, Admin ]
description:
description:
- Description of the role.
type: str
state:
description:
- State of the role.
type: str
default: present
choices: [ present, absent ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Ensure an user role is present
cs_role:
name: myrole_user
delegate_to: localhost
- name: Ensure a role having particular ID is named as myrole_user
cs_role:
name: myrole_user
id: 04589590-ac63-4ffc-93f5-b698b8ac38b6
delegate_to: localhost
- name: Ensure a role is absent
cs_role:
name: myrole_user
state: absent
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the role.
returned: success
type: str
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the role.
returned: success
type: str
sample: myrole
description:
description: Description of the role.
returned: success
type: str
sample: "This is my role description"
role_type:
description: Type of the role.
returned: success
type: str
sample: User
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackRole(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackRole, self).__init__(module)
self.returns = {
'type': 'role_type',
}
def get_role(self):
uuid = self.module.params.get('uuid')
if uuid:
args = {
'id': uuid,
}
roles = self.query_api('listRoles', **args)
if roles:
return roles['role'][0]
else:
args = {
'name': self.module.params.get('name'),
}
roles = self.query_api('listRoles', **args)
if roles:
return roles['role'][0]
return None
def present_role(self):
role = self.get_role()
if role:
role = self._update_role(role)
else:
role = self._create_role(role)
return role
def _create_role(self, role):
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'type': self.module.params.get('role_type'),
'description': self.module.params.get('description'),
}
if not self.module.check_mode:
res = self.query_api('createRole', **args)
role = res['role']
return role
def _update_role(self, role):
args = {
'id': role['id'],
'name': self.module.params.get('name'),
'description': self.module.params.get('description'),
}
if self.has_changed(args, role):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateRole', **args)
# The API as in 4.9 does not return an updated role yet
if 'role' not in res:
role = self.get_role()
else:
role = res['role']
return role
def absent_role(self):
role = self.get_role()
if role:
self.result['changed'] = True
args = {
'id': role['id'],
}
if not self.module.check_mode:
self.query_api('deleteRole', **args)
return role
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
uuid=dict(aliases=['id']),
name=dict(required=True),
description=dict(),
role_type=dict(choices=['User', 'DomainAdmin', 'ResourceAdmin', 'Admin'], default='User'),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_role = AnsibleCloudStackRole(module)
state = module.params.get('state')
if state == 'absent':
role = acs_role.absent_role()
else:
role = acs_role.present_role()
result = acs_role.get_result(role)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
dockerhn/compose | compose/container.py | 13 | 6779 | from __future__ import absolute_import
from __future__ import unicode_literals
from functools import reduce
import six
from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
class Container(object):
"""
Represents a Docker container, constructed from the output of
GET /containers/:id:/json.
"""
def __init__(self, client, dictionary, has_been_inspected=False):
self.client = client
self.dictionary = dictionary
self.has_been_inspected = has_been_inspected
@classmethod
def from_ps(cls, client, dictionary, **kwargs):
"""
Construct a container object from the output of GET /containers/json.
"""
name = get_container_name(dictionary)
if name is None:
return None
new_dictionary = {
'Id': dictionary['Id'],
'Image': dictionary['Image'],
'Name': '/' + name,
}
return cls(client, new_dictionary, **kwargs)
@classmethod
def from_id(cls, client, id):
return cls(client, client.inspect_container(id))
@classmethod
def create(cls, client, **options):
response = client.create_container(**options)
return cls.from_id(client, response['Id'])
@property
def id(self):
return self.dictionary['Id']
@property
def image(self):
return self.dictionary['Image']
@property
def image_config(self):
return self.client.inspect_image(self.image)
@property
def short_id(self):
return self.id[:10]
@property
def name(self):
return self.dictionary['Name'][1:]
@property
def service(self):
return self.labels.get(LABEL_SERVICE)
@property
def name_without_project(self):
project = self.labels.get(LABEL_PROJECT)
if self.name.startswith('{0}_{1}'.format(project, self.service)):
return '{0}_{1}'.format(self.service, self.number)
else:
return self.name
@property
def number(self):
number = self.labels.get(LABEL_CONTAINER_NUMBER)
if not number:
raise ValueError("Container {0} does not have a {1} label".format(
self.short_id, LABEL_CONTAINER_NUMBER))
return int(number)
@property
def ports(self):
self.inspect_if_not_inspected()
return self.get('NetworkSettings.Ports') or {}
@property
def human_readable_ports(self):
def format_port(private, public):
if not public:
return private
return '{HostIp}:{HostPort}->{private}'.format(
private=private, **public[0])
return ', '.join(format_port(*item)
for item in sorted(six.iteritems(self.ports)))
@property
def labels(self):
return self.get('Config.Labels') or {}
@property
def log_config(self):
return self.get('HostConfig.LogConfig') or None
@property
def human_readable_state(self):
if self.is_paused:
return 'Paused'
if self.is_running:
return 'Ghost' if self.get('State.Ghost') else 'Up'
else:
return 'Exit %s' % self.get('State.ExitCode')
@property
def human_readable_command(self):
entrypoint = self.get('Config.Entrypoint') or []
cmd = self.get('Config.Cmd') or []
return ' '.join(entrypoint + cmd)
@property
def environment(self):
return dict(var.split("=", 1) for var in self.get('Config.Env') or [])
@property
def is_running(self):
return self.get('State.Running')
@property
def is_paused(self):
return self.get('State.Paused')
@property
def log_driver(self):
return self.get('HostConfig.LogConfig.Type')
@property
def has_api_logs(self):
log_type = self.log_driver
return not log_type or log_type == 'json-file'
def get(self, key):
"""Return a value from the container or None if the value is not set.
:param key: a string using dotted notation for nested dictionary
lookups
"""
self.inspect_if_not_inspected()
def get_value(dictionary, key):
return (dictionary or {}).get(key)
return reduce(get_value, key.split('.'), self.dictionary)
def get_local_port(self, port, protocol='tcp'):
port = self.ports.get("%s/%s" % (port, protocol))
return "{HostIp}:{HostPort}".format(**port[0]) if port else None
def start(self, **options):
return self.client.start(self.id, **options)
def stop(self, **options):
return self.client.stop(self.id, **options)
def pause(self, **options):
return self.client.pause(self.id, **options)
def unpause(self, **options):
return self.client.unpause(self.id, **options)
def kill(self, **options):
return self.client.kill(self.id, **options)
def restart(self, **options):
return self.client.restart(self.id, **options)
def remove(self, **options):
return self.client.remove_container(self.id, **options)
def inspect_if_not_inspected(self):
if not self.has_been_inspected:
self.inspect()
def wait(self):
return self.client.wait(self.id)
def logs(self, *args, **kwargs):
return self.client.logs(self.id, *args, **kwargs)
def inspect(self):
self.dictionary = self.client.inspect_container(self.id)
self.has_been_inspected = True
return self.dictionary
# TODO: only used by tests, move to test module
def links(self):
links = []
for container in self.client.containers():
for name in container['Names']:
bits = name.split('/')
if len(bits) > 2 and bits[1] == self.name:
links.append(bits[2])
return links
def attach(self, *args, **kwargs):
return self.client.attach(self.id, *args, **kwargs)
def attach_socket(self, **kwargs):
return self.client.attach_socket(self.id, **kwargs)
def __repr__(self):
return '<Container: %s (%s)>' % (self.name, self.id[:6])
def __eq__(self, other):
if type(self) != type(other):
return False
return self.id == other.id
def __hash__(self):
return self.id.__hash__()
def get_container_name(container):
if not container.get('Name') and not container.get('Names'):
return None
# inspect
if 'Name' in container:
return container['Name']
# ps
shortest_name = min(container['Names'], key=lambda n: len(n.split('/')))
return shortest_name.split('/')[-1]
| apache-2.0 |
4eek/edx-platform | lms/djangoapps/shoppingcart/processors/__init__.py | 215 | 2574 | """
Public API for payment processor implementations.
The specific implementation is determined at runtime using Django settings:
CC_PROCESSOR_NAME: The name of the Python module (in `shoppingcart.processors`) to use.
CC_PROCESSOR: Dictionary of configuration options for specific processor implementations,
keyed to processor names.
"""
from django.conf import settings
# Import the processor implementation, using `CC_PROCESSOR_NAME`
# as the name of the Python module in `shoppingcart.processors`
PROCESSOR_MODULE = __import__(
'shoppingcart.processors.' + settings.CC_PROCESSOR_NAME,
fromlist=[
'render_purchase_form_html',
'process_postpay_callback',
'get_purchase_endpoint',
'get_signed_purchase_params',
]
)
def render_purchase_form_html(cart, **kwargs):
"""
Render an HTML form with POSTs to the hosted payment processor.
Args:
cart (Order): The order model representing items in the user's cart.
Returns:
unicode: the rendered HTML form
"""
return PROCESSOR_MODULE.render_purchase_form_html(cart, **kwargs)
def process_postpay_callback(params, **kwargs):
"""
Handle a response from the payment processor.
Concrete implementations should:
1) Verify the parameters and determine if the payment was successful.
2) If successful, mark the order as purchased and call `purchased_callbacks` of the cart items.
3) If unsuccessful, try to figure out why and generate a helpful error message.
4) Return a dictionary of the form:
{'success': bool, 'order': Order, 'error_html': str}
Args:
params (dict): Dictionary of parameters received from the payment processor.
Keyword Args:
Can be used to provide additional information to concrete implementations.
Returns:
dict
"""
return PROCESSOR_MODULE.process_postpay_callback(params, **kwargs)
def get_purchase_endpoint():
"""
Return the URL of the current payment processor's endpoint.
Returns:
unicode
"""
return PROCESSOR_MODULE.get_purchase_endpoint()
def get_signed_purchase_params(cart, **kwargs):
"""
Return the parameters to send to the current payment processor.
Args:
cart (Order): The order model representing items in the user's cart.
Keyword Args:
Can be used to provide additional information to concrete implementations.
Returns:
dict
"""
return PROCESSOR_MODULE.get_signed_purchase_params(cart, **kwargs)
| agpl-3.0 |
remcoboerma/pyfpdf | fpdf/php.py | 24 | 1516 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
from .py3k import PY3K, basestring, unicode
# fpdf php helpers:
def substr(s, start, length=-1):
if length < 0:
length=len(s)-start
return s[start:start+length]
def sprintf(fmt, *args): return fmt % args
def print_r(array):
if not isinstance(array, dict):
array = dict([(k, k) for k in array])
for k, v in array.items():
print("[%s] => %s " % (k, v))
def UTF8ToUTF16BE(instr, setbom=True):
"Converts UTF-8 strings to UTF16-BE."
outstr = "".encode()
if (setbom):
outstr += "\xFE\xFF".encode("latin1")
if not isinstance(instr, unicode):
instr = instr.decode('UTF-8')
outstr += instr.encode('UTF-16BE')
# convert bytes back to fake unicode string until PEP461-like is implemented
if PY3K:
outstr = outstr.decode("latin1")
return outstr
def UTF8StringToArray(instr):
"Converts UTF-8 strings to codepoints array"
return [ord(c) for c in instr]
# ttfints php helpers:
def die(msg):
raise RuntimeError(msg)
def str_repeat(s, count):
return s * count
def str_pad(s, pad_length=0, pad_char= " ", pad_type= +1 ):
if pad_type<0: # pad left
return s.rjust(pad_length, pad_char)
elif pad_type>0: # pad right
return s.ljust(pad_length, pad_char)
else: # pad both
return s.center(pad_length, pad_char)
strlen = count = lambda s: len(s)
| lgpl-3.0 |
pixelated-project/pixelated-user-agent | service/pixelated/register.py | 2 | 3662 | #
# Copyright (c) 2014 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
import getpass
import re
from collections import namedtuple
from leap.bitmask.bonafide.provider import Api
from leap.bitmask.bonafide.session import Session
from pixelated.bitmask_libraries.certs import LeapCertificate
from pixelated.bitmask_libraries.provider import LeapProvider
from pixelated.config import arguments
from pixelated.config import leap_config
from pixelated.config import logger as logger_config
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.logger import Logger
Credentials = namedtuple('Credentials', 'username, password')
logger = Logger()
def _validate(username, password):
validate_username(username)
validate_password(password)
def _set_provider(provider_cert, provider_cert_fingerprint, server_name, leap_home=None):
if leap_home:
leap_config.set_leap_home(leap_home)
LeapCertificate.set_cert_and_fingerprint(provider_cert, provider_cert_fingerprint)
provider = LeapProvider(server_name)
provider.setup_ca()
provider.download_settings()
return provider
def _set_leap_provider(args):
return _set_provider(args.leap_provider_cert, args.leap_provider_cert_fingerprint, args.provider, args.leap_home)
def _bonafide_session(username, password, provider):
srp_provider = Api(provider.api_uri)
credentials = Credentials(username, password)
return Session(credentials, srp_provider, provider.local_ca_crt)
def log_results(created, username):
if created:
logger.info('User %s successfully registered' % username)
else:
logger.error("Register failed")
@inlineCallbacks
def register(username, password, leap_provider, invite=None):
if not password:
password = getpass.getpass('Please enter password for %s: ' % username)
_validate(username, password)
logger.info('password validated...')
srp_auth = _bonafide_session(username, password, leap_provider)
created, user = yield srp_auth.signup(username, password, invite)
log_results(created, username)
def validate_username(username):
accepted_characters = '^[a-z0-9\-\_\.]*$'
if not re.match(accepted_characters, username):
raise ValueError('Only lowercase letters, digits, . - and _ allowed.')
def validate_password(password):
if len(password) < 8:
logger.info('password not validated...')
raise ValueError('The password must have at least 8 characters')
def initialize():
logger_config.init(debug=False)
args = arguments.parse_register_args()
leap_provider = _set_leap_provider(args)
def show_error(err):
logger.info('error: %s' % err)
def shut_down(_):
reactor.stop()
def _register():
d = register(
args.username,
args.password,
leap_provider,
args.invite_code)
d.addErrback(show_error)
d.addBoth(shut_down)
reactor.callWhenRunning(_register)
reactor.run()
| agpl-3.0 |
pantech-msm8960/android_kernel_pantech_msm8960 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
USGS-EROS/lcmap-firebird | ccdc/tile.py | 1 | 1717 | from ccdc import cassandra
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark.sql.types import IntegerType
from pyspark.sql.types import StringType
from pyspark.sql.types import StructField
from pyspark.sql.types import StructType
def table():
"""Cassandra tile table name"""
return 'tile'
def schema():
""" Schema for tile dataframe """
return StructType([
StructField('tx', IntegerType(), nullable=False),
StructField('ty', IntegerType(), nullable=False),
StructField('name', StringType(), nullable=False),
StructField('model', StringType(), nullable=False),
StructField('updated', StringType(), nullable=False)
])
def dataframe(ctx, tx, ty, name, model, updated):
""" Create tile dataframe
Args:
ctx: Spark context
tx: tile x
ty: tile y
name: model name
model: trained model
updated: iso format timestamp
Returns:
Dataframe matching tile.schema()
"""
rows = [Row(tx=tx, ty=ty, name=name, model=model, updated=updated)]
return SparkSession(ctx).createDataFrame(rows)
def read(ctx, ids):
"""Read tile results
Args:
ctx: spark context
ids: dataframe of (x, y)
Returns:
dataframe conforming to tile.schema()
"""
return ids.join(cassandra.read(ctx, table()),
on=['tx', 'ty'],
how='inner')
def write(ctx, df):
"""Write tile
Args:
ctx: spark context
df: dataframe conforming to tile.schema()
Returns:
df
"""
cassandra.write(ctx, df, table())
return df
| unlicense |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.