repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
was4444/chromium.src
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py
628
31933
# Copyright 2012, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """This file provides classes and helper functions for parsing/building frames of the WebSocket protocol (RFC 6455). Specification: http://tools.ietf.org/html/rfc6455 """ from collections import deque import logging import os import struct import time from mod_pywebsocket import common from mod_pywebsocket import util from mod_pywebsocket._stream_base import BadOperationException from mod_pywebsocket._stream_base import ConnectionTerminatedException from mod_pywebsocket._stream_base import InvalidFrameException from mod_pywebsocket._stream_base import InvalidUTF8Exception from mod_pywebsocket._stream_base import StreamBase from mod_pywebsocket._stream_base import UnsupportedFrameException _NOOP_MASKER = util.NoopMasker() class Frame(object): def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0, opcode=None, payload=''): self.fin = fin self.rsv1 = rsv1 self.rsv2 = rsv2 self.rsv3 = rsv3 self.opcode = opcode self.payload = payload # Helper functions made public to be used for writing unittests for WebSocket # clients. def create_length_header(length, mask): """Creates a length header. Args: length: Frame length. Must be less than 2^63. mask: Mask bit. Must be boolean. Raises: ValueError: when bad data is given. """ if mask: mask_bit = 1 << 7 else: mask_bit = 0 if length < 0: raise ValueError('length must be non negative integer') elif length <= 125: return chr(mask_bit | length) elif length < (1 << 16): return chr(mask_bit | 126) + struct.pack('!H', length) elif length < (1 << 63): return chr(mask_bit | 127) + struct.pack('!Q', length) else: raise ValueError('Payload is too big for one frame') def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask): """Creates a frame header. Raises: Exception: when bad data is given. """ if opcode < 0 or 0xf < opcode: raise ValueError('Opcode out of range') if payload_length < 0 or (1 << 63) <= payload_length: raise ValueError('payload_length out of range') if (fin | rsv1 | rsv2 | rsv3) & ~1: raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1') header = '' first_byte = ((fin << 7) | (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4) | opcode) header += chr(first_byte) header += create_length_header(payload_length, mask) return header def _build_frame(header, body, mask): if not mask: return header + body masking_nonce = os.urandom(4) masker = util.RepeatedXorMasker(masking_nonce) return header + masking_nonce + masker.mask(body) def _filter_and_format_frame_object(frame, mask, frame_filters): for frame_filter in frame_filters: frame_filter.filter(frame) header = create_header( frame.opcode, len(frame.payload), frame.fin, frame.rsv1, frame.rsv2, frame.rsv3, mask) return _build_frame(header, frame.payload, mask) def create_binary_frame( message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]): """Creates a simple binary frame with no extension, reserved bit.""" frame = Frame(fin=fin, opcode=opcode, payload=message) return _filter_and_format_frame_object(frame, mask, frame_filters) def create_text_frame( message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]): """Creates a simple text frame with no extension, reserved bit.""" encoded_message = message.encode('utf-8') return create_binary_frame(encoded_message, opcode, fin, mask, frame_filters) def parse_frame(receive_bytes, logger=None, ws_version=common.VERSION_HYBI_LATEST, unmask_receive=True): """Parses a frame. Returns a tuple containing each header field and payload. Args: receive_bytes: a function that reads frame data from a stream or something similar. The function takes length of the bytes to be read. The function must raise ConnectionTerminatedException if there is not enough data to be read. logger: a logging object. ws_version: the version of WebSocket protocol. unmask_receive: unmask received frames. When received unmasked frame, raises InvalidFrameException. Raises: ConnectionTerminatedException: when receive_bytes raises it. InvalidFrameException: when the frame contains invalid data. """ if not logger: logger = logging.getLogger() logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame') received = receive_bytes(2) first_byte = ord(received[0]) fin = (first_byte >> 7) & 1 rsv1 = (first_byte >> 6) & 1 rsv2 = (first_byte >> 5) & 1 rsv3 = (first_byte >> 4) & 1 opcode = first_byte & 0xf second_byte = ord(received[1]) mask = (second_byte >> 7) & 1 payload_length = second_byte & 0x7f logger.log(common.LOGLEVEL_FINE, 'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, ' 'Mask=%s, Payload_length=%s', fin, rsv1, rsv2, rsv3, opcode, mask, payload_length) if (mask == 1) != unmask_receive: raise InvalidFrameException( 'Mask bit on the received frame did\'nt match masking ' 'configuration for received frames') # The HyBi and later specs disallow putting a value in 0x0-0xFFFF # into the 8-octet extended payload length field (or 0x0-0xFD in # 2-octet field). valid_length_encoding = True length_encoding_bytes = 1 if payload_length == 127: logger.log(common.LOGLEVEL_FINE, 'Receive 8-octet extended payload length') extended_payload_length = receive_bytes(8) payload_length = struct.unpack( '!Q', extended_payload_length)[0] if payload_length > 0x7FFFFFFFFFFFFFFF: raise InvalidFrameException( 'Extended payload length >= 2^63') if ws_version >= 13 and payload_length < 0x10000: valid_length_encoding = False length_encoding_bytes = 8 logger.log(common.LOGLEVEL_FINE, 'Decoded_payload_length=%s', payload_length) elif payload_length == 126: logger.log(common.LOGLEVEL_FINE, 'Receive 2-octet extended payload length') extended_payload_length = receive_bytes(2) payload_length = struct.unpack( '!H', extended_payload_length)[0] if ws_version >= 13 and payload_length < 126: valid_length_encoding = False length_encoding_bytes = 2 logger.log(common.LOGLEVEL_FINE, 'Decoded_payload_length=%s', payload_length) if not valid_length_encoding: logger.warning( 'Payload length is not encoded using the minimal number of ' 'bytes (%d is encoded using %d bytes)', payload_length, length_encoding_bytes) if mask == 1: logger.log(common.LOGLEVEL_FINE, 'Receive mask') masking_nonce = receive_bytes(4) masker = util.RepeatedXorMasker(masking_nonce) logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce) else: masker = _NOOP_MASKER logger.log(common.LOGLEVEL_FINE, 'Receive payload data') if logger.isEnabledFor(common.LOGLEVEL_FINE): receive_start = time.time() raw_payload_bytes = receive_bytes(payload_length) if logger.isEnabledFor(common.LOGLEVEL_FINE): logger.log( common.LOGLEVEL_FINE, 'Done receiving payload data at %s MB/s', payload_length / (time.time() - receive_start) / 1000 / 1000) logger.log(common.LOGLEVEL_FINE, 'Unmask payload data') if logger.isEnabledFor(common.LOGLEVEL_FINE): unmask_start = time.time() unmasked_bytes = masker.mask(raw_payload_bytes) if logger.isEnabledFor(common.LOGLEVEL_FINE): logger.log( common.LOGLEVEL_FINE, 'Done unmasking payload data at %s MB/s', payload_length / (time.time() - unmask_start) / 1000 / 1000) return opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3 class FragmentedFrameBuilder(object): """A stateful class to send a message as fragments.""" def __init__(self, mask, frame_filters=[], encode_utf8=True): """Constructs an instance.""" self._mask = mask self._frame_filters = frame_filters # This is for skipping UTF-8 encoding when building text type frames # from compressed data. self._encode_utf8 = encode_utf8 self._started = False # Hold opcode of the first frame in messages to verify types of other # frames in the message are all the same. self._opcode = common.OPCODE_TEXT def build(self, payload_data, end, binary): if binary: frame_type = common.OPCODE_BINARY else: frame_type = common.OPCODE_TEXT if self._started: if self._opcode != frame_type: raise ValueError('Message types are different in frames for ' 'the same message') opcode = common.OPCODE_CONTINUATION else: opcode = frame_type self._opcode = frame_type if end: self._started = False fin = 1 else: self._started = True fin = 0 if binary or not self._encode_utf8: return create_binary_frame( payload_data, opcode, fin, self._mask, self._frame_filters) else: return create_text_frame( payload_data, opcode, fin, self._mask, self._frame_filters) def _create_control_frame(opcode, body, mask, frame_filters): frame = Frame(opcode=opcode, payload=body) for frame_filter in frame_filters: frame_filter.filter(frame) if len(frame.payload) > 125: raise BadOperationException( 'Payload data size of control frames must be 125 bytes or less') header = create_header( frame.opcode, len(frame.payload), frame.fin, frame.rsv1, frame.rsv2, frame.rsv3, mask) return _build_frame(header, frame.payload, mask) def create_ping_frame(body, mask=False, frame_filters=[]): return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters) def create_pong_frame(body, mask=False, frame_filters=[]): return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters) def create_close_frame(body, mask=False, frame_filters=[]): return _create_control_frame( common.OPCODE_CLOSE, body, mask, frame_filters) def create_closing_handshake_body(code, reason): body = '' if code is not None: if (code > common.STATUS_USER_PRIVATE_MAX or code < common.STATUS_NORMAL_CLOSURE): raise BadOperationException('Status code is out of range') if (code == common.STATUS_NO_STATUS_RECEIVED or code == common.STATUS_ABNORMAL_CLOSURE or code == common.STATUS_TLS_HANDSHAKE): raise BadOperationException('Status code is reserved pseudo ' 'code') encoded_reason = reason.encode('utf-8') body = struct.pack('!H', code) + encoded_reason return body class StreamOptions(object): """Holds option values to configure Stream objects.""" def __init__(self): """Constructs StreamOptions.""" # Filters applied to frames. self.outgoing_frame_filters = [] self.incoming_frame_filters = [] # Filters applied to messages. Control frames are not affected by them. self.outgoing_message_filters = [] self.incoming_message_filters = [] self.encode_text_message_to_utf8 = True self.mask_send = False self.unmask_receive = True class Stream(StreamBase): """A class for parsing/building frames of the WebSocket protocol (RFC 6455). """ def __init__(self, request, options): """Constructs an instance. Args: request: mod_python request. """ StreamBase.__init__(self, request) self._logger = util.get_class_logger(self) self._options = options self._request.client_terminated = False self._request.server_terminated = False # Holds body of received fragments. self._received_fragments = [] # Holds the opcode of the first fragment. self._original_opcode = None self._writer = FragmentedFrameBuilder( self._options.mask_send, self._options.outgoing_frame_filters, self._options.encode_text_message_to_utf8) self._ping_queue = deque() def _receive_frame(self): """Receives a frame and return data in the frame as a tuple containing each header field and payload separately. Raises: ConnectionTerminatedException: when read returns empty string. InvalidFrameException: when the frame contains invalid data. """ def _receive_bytes(length): return self.receive_bytes(length) return parse_frame(receive_bytes=_receive_bytes, logger=self._logger, ws_version=self._request.ws_version, unmask_receive=self._options.unmask_receive) def _receive_frame_as_frame_object(self): opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame() return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3, opcode=opcode, payload=unmasked_bytes) def receive_filtered_frame(self): """Receives a frame and applies frame filters and message filters. The frame to be received must satisfy following conditions: - The frame is not fragmented. - The opcode of the frame is TEXT or BINARY. DO NOT USE this method except for testing purpose. """ frame = self._receive_frame_as_frame_object() if not frame.fin: raise InvalidFrameException( 'Segmented frames must not be received via ' 'receive_filtered_frame()') if (frame.opcode != common.OPCODE_TEXT and frame.opcode != common.OPCODE_BINARY): raise InvalidFrameException( 'Control frames must not be received via ' 'receive_filtered_frame()') for frame_filter in self._options.incoming_frame_filters: frame_filter.filter(frame) for message_filter in self._options.incoming_message_filters: frame.payload = message_filter.filter(frame.payload) return frame def send_message(self, message, end=True, binary=False): """Send message. Args: message: text in unicode or binary in str to send. binary: send message as binary frame. Raises: BadOperationException: when called on a server-terminated connection or called with inconsistent message type or binary parameter. """ if self._request.server_terminated: raise BadOperationException( 'Requested send_message after sending out a closing handshake') if binary and isinstance(message, unicode): raise BadOperationException( 'Message for binary frame must be instance of str') for message_filter in self._options.outgoing_message_filters: message = message_filter.filter(message, end, binary) try: # Set this to any positive integer to limit maximum size of data in # payload data of each frame. MAX_PAYLOAD_DATA_SIZE = -1 if MAX_PAYLOAD_DATA_SIZE <= 0: self._write(self._writer.build(message, end, binary)) return bytes_written = 0 while True: end_for_this_frame = end bytes_to_write = len(message) - bytes_written if (MAX_PAYLOAD_DATA_SIZE > 0 and bytes_to_write > MAX_PAYLOAD_DATA_SIZE): end_for_this_frame = False bytes_to_write = MAX_PAYLOAD_DATA_SIZE frame = self._writer.build( message[bytes_written:bytes_written + bytes_to_write], end_for_this_frame, binary) self._write(frame) bytes_written += bytes_to_write # This if must be placed here (the end of while block) so that # at least one frame is sent. if len(message) <= bytes_written: break except ValueError, e: raise BadOperationException(e) def _get_message_from_frame(self, frame): """Gets a message from frame. If the message is composed of fragmented frames and the frame is not the last fragmented frame, this method returns None. The whole message will be returned when the last fragmented frame is passed to this method. Raises: InvalidFrameException: when the frame doesn't match defragmentation context, or the frame contains invalid data. """ if frame.opcode == common.OPCODE_CONTINUATION: if not self._received_fragments: if frame.fin: raise InvalidFrameException( 'Received a termination frame but fragmentation ' 'not started') else: raise InvalidFrameException( 'Received an intermediate frame but ' 'fragmentation not started') if frame.fin: # End of fragmentation frame self._received_fragments.append(frame.payload) message = ''.join(self._received_fragments) self._received_fragments = [] return message else: # Intermediate frame self._received_fragments.append(frame.payload) return None else: if self._received_fragments: if frame.fin: raise InvalidFrameException( 'Received an unfragmented frame without ' 'terminating existing fragmentation') else: raise InvalidFrameException( 'New fragmentation started without terminating ' 'existing fragmentation') if frame.fin: # Unfragmented frame self._original_opcode = frame.opcode return frame.payload else: # Start of fragmentation frame if common.is_control_opcode(frame.opcode): raise InvalidFrameException( 'Control frames must not be fragmented') self._original_opcode = frame.opcode self._received_fragments.append(frame.payload) return None def _process_close_message(self, message): """Processes close message. Args: message: close message. Raises: InvalidFrameException: when the message is invalid. """ self._request.client_terminated = True # Status code is optional. We can have status reason only if we # have status code. Status reason can be empty string. So, # allowed cases are # - no application data: no code no reason # - 2 octet of application data: has code but no reason # - 3 or more octet of application data: both code and reason if len(message) == 0: self._logger.debug('Received close frame (empty body)') self._request.ws_close_code = ( common.STATUS_NO_STATUS_RECEIVED) elif len(message) == 1: raise InvalidFrameException( 'If a close frame has status code, the length of ' 'status code must be 2 octet') elif len(message) >= 2: self._request.ws_close_code = struct.unpack( '!H', message[0:2])[0] self._request.ws_close_reason = message[2:].decode( 'utf-8', 'replace') self._logger.debug( 'Received close frame (code=%d, reason=%r)', self._request.ws_close_code, self._request.ws_close_reason) # As we've received a close frame, no more data is coming over the # socket. We can now safely close the socket without worrying about # RST sending. if self._request.server_terminated: self._logger.debug( 'Received ack for server-initiated closing handshake') return self._logger.debug( 'Received client-initiated closing handshake') code = common.STATUS_NORMAL_CLOSURE reason = '' if hasattr(self._request, '_dispatcher'): dispatcher = self._request._dispatcher code, reason = dispatcher.passive_closing_handshake( self._request) if code is None and reason is not None and len(reason) > 0: self._logger.warning( 'Handler specified reason despite code being None') reason = '' if reason is None: reason = '' self._send_closing_handshake(code, reason) self._logger.debug( 'Acknowledged closing handshake initiated by the peer ' '(code=%r, reason=%r)', code, reason) def _process_ping_message(self, message): """Processes ping message. Args: message: ping message. """ try: handler = self._request.on_ping_handler if handler: handler(self._request, message) return except AttributeError, e: pass self._send_pong(message) def _process_pong_message(self, message): """Processes pong message. Args: message: pong message. """ # TODO(tyoshino): Add ping timeout handling. inflight_pings = deque() while True: try: expected_body = self._ping_queue.popleft() if expected_body == message: # inflight_pings contains pings ignored by the # other peer. Just forget them. self._logger.debug( 'Ping %r is acked (%d pings were ignored)', expected_body, len(inflight_pings)) break else: inflight_pings.append(expected_body) except IndexError, e: # The received pong was unsolicited pong. Keep the # ping queue as is. self._ping_queue = inflight_pings self._logger.debug('Received a unsolicited pong') break try: handler = self._request.on_pong_handler if handler: handler(self._request, message) except AttributeError, e: pass def receive_message(self): """Receive a WebSocket frame and return its payload as a text in unicode or a binary in str. Returns: payload data of the frame - as unicode instance if received text frame - as str instance if received binary frame or None iff received closing handshake. Raises: BadOperationException: when called on a client-terminated connection. ConnectionTerminatedException: when read returns empty string. InvalidFrameException: when the frame contains invalid data. UnsupportedFrameException: when the received frame has flags, opcode we cannot handle. You can ignore this exception and continue receiving the next frame. """ if self._request.client_terminated: raise BadOperationException( 'Requested receive_message after receiving a closing ' 'handshake') while True: # mp_conn.read will block if no bytes are available. # Timeout is controlled by TimeOut directive of Apache. frame = self._receive_frame_as_frame_object() # Check the constraint on the payload size for control frames # before extension processes the frame. # See also http://tools.ietf.org/html/rfc6455#section-5.5 if (common.is_control_opcode(frame.opcode) and len(frame.payload) > 125): raise InvalidFrameException( 'Payload data size of control frames must be 125 bytes or ' 'less') for frame_filter in self._options.incoming_frame_filters: frame_filter.filter(frame) if frame.rsv1 or frame.rsv2 or frame.rsv3: raise UnsupportedFrameException( 'Unsupported flag is set (rsv = %d%d%d)' % (frame.rsv1, frame.rsv2, frame.rsv3)) message = self._get_message_from_frame(frame) if message is None: continue for message_filter in self._options.incoming_message_filters: message = message_filter.filter(message) if self._original_opcode == common.OPCODE_TEXT: # The WebSocket protocol section 4.4 specifies that invalid # characters must be replaced with U+fffd REPLACEMENT # CHARACTER. try: return message.decode('utf-8') except UnicodeDecodeError, e: raise InvalidUTF8Exception(e) elif self._original_opcode == common.OPCODE_BINARY: return message elif self._original_opcode == common.OPCODE_CLOSE: self._process_close_message(message) return None elif self._original_opcode == common.OPCODE_PING: self._process_ping_message(message) elif self._original_opcode == common.OPCODE_PONG: self._process_pong_message(message) else: raise UnsupportedFrameException( 'Opcode %d is not supported' % self._original_opcode) def _send_closing_handshake(self, code, reason): body = create_closing_handshake_body(code, reason) frame = create_close_frame( body, mask=self._options.mask_send, frame_filters=self._options.outgoing_frame_filters) self._request.server_terminated = True self._write(frame) def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason='', wait_response=True): """Closes a WebSocket connection. Args: code: Status code for close frame. If code is None, a close frame with empty body will be sent. reason: string representing close reason. wait_response: True when caller want to wait the response. Raises: BadOperationException: when reason is specified with code None or reason is not an instance of both str and unicode. """ if self._request.server_terminated: self._logger.debug( 'Requested close_connection but server is already terminated') return if code is None: if reason is not None and len(reason) > 0: raise BadOperationException( 'close reason must not be specified if code is None') reason = '' else: if not isinstance(reason, str) and not isinstance(reason, unicode): raise BadOperationException( 'close reason must be an instance of str or unicode') self._send_closing_handshake(code, reason) self._logger.debug( 'Initiated closing handshake (code=%r, reason=%r)', code, reason) if (code == common.STATUS_GOING_AWAY or code == common.STATUS_PROTOCOL_ERROR) or not wait_response: # It doesn't make sense to wait for a close frame if the reason is # protocol error or that the server is going away. For some of # other reasons, it might not make sense to wait for a close frame, # but it's not clear, yet. return # TODO(ukai): 2. wait until the /client terminated/ flag has been set, # or until a server-defined timeout expires. # # For now, we expect receiving closing handshake right after sending # out closing handshake. message = self.receive_message() if message is not None: raise ConnectionTerminatedException( 'Didn\'t receive valid ack for closing handshake') # TODO: 3. close the WebSocket connection. # note: mod_python Connection (mp_conn) doesn't have close method. def send_ping(self, body=''): frame = create_ping_frame( body, self._options.mask_send, self._options.outgoing_frame_filters) self._write(frame) self._ping_queue.append(body) def _send_pong(self, body): frame = create_pong_frame( body, self._options.mask_send, self._options.outgoing_frame_filters) self._write(frame) def get_last_received_opcode(self): """Returns the opcode of the WebSocket message which the last received frame belongs to. The return value is valid iff immediately after receive_message call. """ return self._original_opcode # vi:sts=4 sw=4 et
bsd-3-clause
bgris/ODL_bgris
lib/python3.5/site-packages/future/backports/email/_parseaddr.py
82
17389
# Copyright (C) 2002-2007 Python Software Foundation # Contact: email-sig@python.org """Email address parsing code. Lifted directly from rfc822.py. This should eventually be rewritten. """ from __future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import from future.builtins import int __all__ = [ 'mktime_tz', 'parsedate', 'parsedate_tz', 'quote', ] import time, calendar SPACE = ' ' EMPTYSTRING = '' COMMASPACE = ', ' # Parse a date field _monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec', 'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december'] _daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] # The timezone table does not include the military time zones defined # in RFC822, other than Z. According to RFC1123, the description in # RFC822 gets the signs wrong, so we can't rely on any such time # zones. RFC1123 recommends that numeric timezone indicators be used # instead of timezone names. _timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0, 'AST': -400, 'ADT': -300, # Atlantic (used in Canada) 'EST': -500, 'EDT': -400, # Eastern 'CST': -600, 'CDT': -500, # Central 'MST': -700, 'MDT': -600, # Mountain 'PST': -800, 'PDT': -700 # Pacific } def parsedate_tz(data): """Convert a date string to a time tuple. Accounts for military timezones. """ res = _parsedate_tz(data) if not res: return if res[9] is None: res[9] = 0 return tuple(res) def _parsedate_tz(data): """Convert date to extended time tuple. The last (additional) element is the time zone offset in seconds, except if the timezone was specified as -0000. In that case the last element is None. This indicates a UTC timestamp that explicitly declaims knowledge of the source timezone, as opposed to a +0000 timestamp that indicates the source timezone really was UTC. """ if not data: return data = data.split() # The FWS after the comma after the day-of-week is optional, so search and # adjust for this. if data[0].endswith(',') or data[0].lower() in _daynames: # There's a dayname here. Skip it del data[0] else: i = data[0].rfind(',') if i >= 0: data[0] = data[0][i+1:] if len(data) == 3: # RFC 850 date, deprecated stuff = data[0].split('-') if len(stuff) == 3: data = stuff + data[1:] if len(data) == 4: s = data[3] i = s.find('+') if i == -1: i = s.find('-') if i > 0: data[3:] = [s[:i], s[i:]] else: data.append('') # Dummy tz if len(data) < 5: return None data = data[:5] [dd, mm, yy, tm, tz] = data mm = mm.lower() if mm not in _monthnames: dd, mm = mm, dd.lower() if mm not in _monthnames: return None mm = _monthnames.index(mm) + 1 if mm > 12: mm -= 12 if dd[-1] == ',': dd = dd[:-1] i = yy.find(':') if i > 0: yy, tm = tm, yy if yy[-1] == ',': yy = yy[:-1] if not yy[0].isdigit(): yy, tz = tz, yy if tm[-1] == ',': tm = tm[:-1] tm = tm.split(':') if len(tm) == 2: [thh, tmm] = tm tss = '0' elif len(tm) == 3: [thh, tmm, tss] = tm elif len(tm) == 1 and '.' in tm[0]: # Some non-compliant MUAs use '.' to separate time elements. tm = tm[0].split('.') if len(tm) == 2: [thh, tmm] = tm tss = 0 elif len(tm) == 3: [thh, tmm, tss] = tm else: return None try: yy = int(yy) dd = int(dd) thh = int(thh) tmm = int(tmm) tss = int(tss) except ValueError: return None # Check for a yy specified in two-digit format, then convert it to the # appropriate four-digit format, according to the POSIX standard. RFC 822 # calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822) # mandates a 4-digit yy. For more information, see the documentation for # the time module. if yy < 100: # The year is between 1969 and 1999 (inclusive). if yy > 68: yy += 1900 # The year is between 2000 and 2068 (inclusive). else: yy += 2000 tzoffset = None tz = tz.upper() if tz in _timezones: tzoffset = _timezones[tz] else: try: tzoffset = int(tz) except ValueError: pass if tzoffset==0 and tz.startswith('-'): tzoffset = None # Convert a timezone offset into seconds ; -0500 -> -18000 if tzoffset: if tzoffset < 0: tzsign = -1 tzoffset = -tzoffset else: tzsign = 1 tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60) # Daylight Saving Time flag is set to -1, since DST is unknown. return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset] def parsedate(data): """Convert a time string to a time tuple.""" t = parsedate_tz(data) if isinstance(t, tuple): return t[:9] else: return t def mktime_tz(data): """Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp.""" if data[9] is None: # No zone info, so localtime is better assumption than GMT return time.mktime(data[:8] + (-1,)) else: t = calendar.timegm(data) return t - data[9] def quote(str): """Prepare string to be used in a quoted string. Turns backslash and double quote characters into quoted pairs. These are the only characters that need to be quoted inside a quoted string. Does not add the surrounding double quotes. """ return str.replace('\\', '\\\\').replace('"', '\\"') class AddrlistClass(object): """Address parser class by Ben Escoto. To understand what this class does, it helps to have a copy of RFC 2822 in front of you. Note: this class interface is deprecated and may be removed in the future. Use email.utils.AddressList instead. """ def __init__(self, field): """Initialize a new instance. `field' is an unparsed address header field, containing one or more addresses. """ self.specials = '()<>@,:;.\"[]' self.pos = 0 self.LWS = ' \t' self.CR = '\r\n' self.FWS = self.LWS + self.CR self.atomends = self.specials + self.LWS + self.CR # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it # is obsolete syntax. RFC 2822 requires that we recognize obsolete # syntax, so allow dots in phrases. self.phraseends = self.atomends.replace('.', '') self.field = field self.commentlist = [] def gotonext(self): """Skip white space and extract comments.""" wslist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS + '\n\r': if self.field[self.pos] not in '\n\r': wslist.append(self.field[self.pos]) self.pos += 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) else: break return EMPTYSTRING.join(wslist) def getaddrlist(self): """Parse all addresses. Returns a list containing all of the addresses. """ result = [] while self.pos < len(self.field): ad = self.getaddress() if ad: result += ad else: result.append(('', '')) return result def getaddress(self): """Parse the next address.""" self.commentlist = [] self.gotonext() oldpos = self.pos oldcl = self.commentlist plist = self.getphraselist() self.gotonext() returnlist = [] if self.pos >= len(self.field): # Bad email address technically, no domain. if plist: returnlist = [(SPACE.join(self.commentlist), plist[0])] elif self.field[self.pos] in '.@': # email address is just an addrspec # this isn't very efficient since we start over self.pos = oldpos self.commentlist = oldcl addrspec = self.getaddrspec() returnlist = [(SPACE.join(self.commentlist), addrspec)] elif self.field[self.pos] == ':': # address is a group returnlist = [] fieldlen = len(self.field) self.pos += 1 while self.pos < len(self.field): self.gotonext() if self.pos < fieldlen and self.field[self.pos] == ';': self.pos += 1 break returnlist = returnlist + self.getaddress() elif self.field[self.pos] == '<': # Address is a phrase then a route addr routeaddr = self.getrouteaddr() if self.commentlist: returnlist = [(SPACE.join(plist) + ' (' + ' '.join(self.commentlist) + ')', routeaddr)] else: returnlist = [(SPACE.join(plist), routeaddr)] else: if plist: returnlist = [(SPACE.join(self.commentlist), plist[0])] elif self.field[self.pos] in self.specials: self.pos += 1 self.gotonext() if self.pos < len(self.field) and self.field[self.pos] == ',': self.pos += 1 return returnlist def getrouteaddr(self): """Parse a route address (Return-path value). This method just skips all the route stuff and returns the addrspec. """ if self.field[self.pos] != '<': return expectroute = False self.pos += 1 self.gotonext() adlist = '' while self.pos < len(self.field): if expectroute: self.getdomain() expectroute = False elif self.field[self.pos] == '>': self.pos += 1 break elif self.field[self.pos] == '@': self.pos += 1 expectroute = True elif self.field[self.pos] == ':': self.pos += 1 else: adlist = self.getaddrspec() self.pos += 1 break self.gotonext() return adlist def getaddrspec(self): """Parse an RFC 2822 addr-spec.""" aslist = [] self.gotonext() while self.pos < len(self.field): preserve_ws = True if self.field[self.pos] == '.': if aslist and not aslist[-1].strip(): aslist.pop() aslist.append('.') self.pos += 1 preserve_ws = False elif self.field[self.pos] == '"': aslist.append('"%s"' % quote(self.getquote())) elif self.field[self.pos] in self.atomends: if aslist and not aslist[-1].strip(): aslist.pop() break else: aslist.append(self.getatom()) ws = self.gotonext() if preserve_ws and ws: aslist.append(ws) if self.pos >= len(self.field) or self.field[self.pos] != '@': return EMPTYSTRING.join(aslist) aslist.append('@') self.pos += 1 self.gotonext() return EMPTYSTRING.join(aslist) + self.getdomain() def getdomain(self): """Get the complete domain name from an address.""" sdlist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS: self.pos += 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] == '[': sdlist.append(self.getdomainliteral()) elif self.field[self.pos] == '.': self.pos += 1 sdlist.append('.') elif self.field[self.pos] in self.atomends: break else: sdlist.append(self.getatom()) return EMPTYSTRING.join(sdlist) def getdelimited(self, beginchar, endchars, allowcomments=True): """Parse a header fragment delimited by special characters. `beginchar' is the start character for the fragment. If self is not looking at an instance of `beginchar' then getdelimited returns the empty string. `endchars' is a sequence of allowable end-delimiting characters. Parsing stops when one of these is encountered. If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed within the parsed fragment. """ if self.field[self.pos] != beginchar: return '' slist = [''] quote = False self.pos += 1 while self.pos < len(self.field): if quote: slist.append(self.field[self.pos]) quote = False elif self.field[self.pos] in endchars: self.pos += 1 break elif allowcomments and self.field[self.pos] == '(': slist.append(self.getcomment()) continue # have already advanced pos from getcomment elif self.field[self.pos] == '\\': quote = True else: slist.append(self.field[self.pos]) self.pos += 1 return EMPTYSTRING.join(slist) def getquote(self): """Get a quote-delimited fragment from self's field.""" return self.getdelimited('"', '"\r', False) def getcomment(self): """Get a parenthesis-delimited fragment from self's field.""" return self.getdelimited('(', ')\r', True) def getdomainliteral(self): """Parse an RFC 2822 domain-literal.""" return '[%s]' % self.getdelimited('[', ']\r', False) def getatom(self, atomends=None): """Parse an RFC 2822 atom. Optional atomends specifies a different set of end token delimiters (the default is to use self.atomends). This is used e.g. in getphraselist() since phrase endings must not include the `.' (which is legal in phrases).""" atomlist = [''] if atomends is None: atomends = self.atomends while self.pos < len(self.field): if self.field[self.pos] in atomends: break else: atomlist.append(self.field[self.pos]) self.pos += 1 return EMPTYSTRING.join(atomlist) def getphraselist(self): """Parse a sequence of RFC 2822 phrases. A phrase is a sequence of words, which are in turn either RFC 2822 atoms or quoted-strings. Phrases are canonicalized by squeezing all runs of continuous whitespace into one space. """ plist = [] while self.pos < len(self.field): if self.field[self.pos] in self.FWS: self.pos += 1 elif self.field[self.pos] == '"': plist.append(self.getquote()) elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] in self.phraseends: break else: plist.append(self.getatom(self.phraseends)) return plist class AddressList(AddrlistClass): """An AddressList encapsulates a list of parsed RFC 2822 addresses.""" def __init__(self, field): AddrlistClass.__init__(self, field) if field: self.addresslist = self.getaddrlist() else: self.addresslist = [] def __len__(self): return len(self.addresslist) def __add__(self, other): # Set union newaddr = AddressList(None) newaddr.addresslist = self.addresslist[:] for x in other.addresslist: if not x in self.addresslist: newaddr.addresslist.append(x) return newaddr def __iadd__(self, other): # Set union, in-place for x in other.addresslist: if not x in self.addresslist: self.addresslist.append(x) return self def __sub__(self, other): # Set difference newaddr = AddressList(None) for x in self.addresslist: if not x in other.addresslist: newaddr.addresslist.append(x) return newaddr def __isub__(self, other): # Set difference, in-place for x in other.addresslist: if x in self.addresslist: self.addresslist.remove(x) return self def __getitem__(self, index): # Make indexing, slices, and 'in' work return self.addresslist[index]
gpl-3.0
sikmir/QGIS
tests/src/python/test_qgslocator.py
3
16200
# -*- coding: utf-8 -*- """QGIS Unit tests for QgsLocator. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = '(C) 2017 by Nyall Dawson' __date__ = '6/05/2017' __copyright__ = 'Copyright 2017, The QGIS Project' import qgis # NOQA import os from qgis.core import (QgsLocator, QgsLocatorFilter, QgsLocatorContext, QgsLocatorResult, QgsLocatorModel, QgsLocatorProxyModel, QgsLocatorAutomaticModel, QgsSettings) from qgis.PyQt.QtCore import QVariant, pyqtSignal, QCoreApplication from time import sleep from qgis.testing import start_app, unittest from qgis.PyQt import sip start_app() class test_filter(QgsLocatorFilter): def __init__(self, identifier, prefix=None, groupResult=False, parent=None): super().__init__(parent) self.identifier = identifier self._prefix = prefix self.groupResult = groupResult def clone(self): return test_filter(self.identifier, self.prefix, self.groupResult) def name(self): return 'test_' + self.identifier def displayName(self): return 'test_' + self.identifier def prefix(self): return self._prefix def fetchResults(self, string, context, feedback): n = 3 if not self.groupResult else 9 for i in range(n): if feedback.isCanceled(): return sleep(0.001) result = QgsLocatorResult() result.displayString = self.identifier + str(i) if self.groupResult: if i < 6: result.group = 'first group' elif i < 8: result.group = 'second group' self.resultFetched.emit(result) def triggerResult(self, result): pass def priority(self): if self.identifier == 'a': return QgsLocatorFilter.High elif self.identifier == 'b': return QgsLocatorFilter.Medium elif self.identifier == 'c': return QgsLocatorFilter.Low else: return QgsLocatorFilter.Medium class TestQgsLocator(unittest.TestCase): def testRegisteringFilters(self): l = QgsLocator() filter_a = test_filter('a') filter_b = test_filter('b') l.registerFilter(filter_a) l.registerFilter(filter_b) self.assertEqual(set(l.filters()), {filter_a, filter_b}) # ownership should be transferred to locator del l self.assertTrue(sip.isdeleted(filter_a)) self.assertTrue(sip.isdeleted(filter_b)) # try manually deregistering l = QgsLocator() filter_c = test_filter('c') filter_d = test_filter('d') l.registerFilter(filter_c) l.registerFilter(filter_d) self.assertEqual(set(l.filters()), {filter_c, filter_d}) l.deregisterFilter(filter_c) self.assertTrue(sip.isdeleted(filter_c)) self.assertFalse(sip.isdeleted(filter_d)) self.assertEqual(l.filters(), [filter_d]) del l self.assertTrue(sip.isdeleted(filter_c)) self.assertTrue(sip.isdeleted(filter_d)) def testFetchingResults(self): def got_hit(result): got_hit._results_.append(result.displayString) got_hit._results_ = [] context = QgsLocatorContext() # one filter l = QgsLocator() filter_a = test_filter('a') l.registerFilter(filter_a) l.foundResult.connect(got_hit) l.fetchResults('a', context) for i in range(100): sleep(0.002) QCoreApplication.processEvents() self.assertEqual(set(got_hit._results_), {'a0', 'a1', 'a2'}) # two filters filter_b = test_filter('b') l.registerFilter(filter_b) got_hit._results_ = [] l.fetchResults('a', context) for i in range(100): sleep(0.002) QCoreApplication.processEvents() self.assertEqual(set(got_hit._results_), {'a0', 'a1', 'a2', 'b0', 'b1', 'b2'}) def testDeleteWhileFetchingResults(self): """ Delete locator whilst fetching results """ def got_hit(result): got_hit._results_.append(result.displayString) got_hit._results_ = [] context = QgsLocatorContext() l = QgsLocator() filter_a = test_filter('a') l.registerFilter(filter_a) l.foundResult.connect(got_hit) l.fetchResults('a', context) del l def testCancelWhileFetchingResults(self): """ Cancel locator whilst fetching results """ def got_hit(result): got_hit._results_.append(result.displayString) got_hit._results_ = [] context = QgsLocatorContext() l = QgsLocator() filter_a = test_filter('a') l.registerFilter(filter_a) l.foundResult.connect(got_hit) l.fetchResults('a', context) l.cancel() def testPrefixes(self): """ Test custom (active) prefixes """ def got_hit(result): got_hit._results_.append(result.displayString) got_hit._results_ = [] context = QgsLocatorContext() l = QgsLocator() # filter with prefix filter_a = test_filter('a', 'aaa') l.registerFilter(filter_a) self.assertEqual(filter_a.prefix(), 'aaa') self.assertEqual(filter_a.activePrefix(), 'aaa') self.assertEqual(filter_a.useWithoutPrefix(), True) l.foundResult.connect(got_hit) l.fetchResults('aaa a', context) for i in range(100): sleep(0.002) QCoreApplication.processEvents() self.assertEqual(set(got_hit._results_), {'a0', 'a1', 'a2'}) got_hit._results_ = [] l.fetchResults('bbb b', context) for i in range(100): sleep(0.002) QCoreApplication.processEvents() self.assertEqual(set(got_hit._results_), {'a0', 'a1', 'a2'}) got_hit._results_ = [] filter_a.setUseWithoutPrefix(False) self.assertEqual(filter_a.useWithoutPrefix(), False) l.fetchResults('bbb b', context) for i in range(100): sleep(0.002) QCoreApplication.processEvents() self.assertEqual(got_hit._results_, []) got_hit._results_ = [] # test with two filters filter_b = test_filter('b', 'bbb') l.registerFilter(filter_b) self.assertEqual(filter_b.prefix(), 'bbb') self.assertEqual(filter_b.activePrefix(), 'bbb') got_hit._results_ = [] l.fetchResults('bbb b', context) for i in range(100): sleep(0.002) QCoreApplication.processEvents() self.assertEqual(set(got_hit._results_), {'b0', 'b1', 'b2'}) l.deregisterFilter(filter_b) # test with two filters with same prefix filter_b = test_filter('b', 'aaa') l.registerFilter(filter_b) self.assertEqual(filter_b.prefix(), 'aaa') self.assertEqual(filter_b.activePrefix(), 'aaa') got_hit._results_ = [] l.fetchResults('aaa b', context) for i in range(100): sleep(0.002) QCoreApplication.processEvents() self.assertEqual(set(got_hit._results_), {'a0', 'a1', 'a2', 'b0', 'b1', 'b2'}) l.deregisterFilter(filter_b) # filter with invalid prefix (less than 3 char) filter_c = test_filter('c', 'bb') l.registerFilter(filter_c) self.assertEqual(filter_c.prefix(), 'bb') self.assertEqual(filter_c.activePrefix(), '') got_hit._results_ = [] l.fetchResults('b', context) for i in range(100): sleep(0.002) QCoreApplication.processEvents() self.assertEqual(set(got_hit._results_), {'c0', 'c1', 'c2'}) l.deregisterFilter(filter_c) # filter with custom prefix QgsSettings().setValue("locator_filters/prefix_test_custom", 'xyz', QgsSettings.Gui) filter_c = test_filter('custom', 'abc') l.registerFilter(filter_c) self.assertEqual(filter_c.prefix(), 'abc') self.assertEqual(filter_c.activePrefix(), 'xyz') got_hit._results_ = [] l.fetchResults('b', context) for i in range(100): sleep(0.002) QCoreApplication.processEvents() self.assertEqual(set(got_hit._results_), {'custom0', 'custom1', 'custom2'}) l.deregisterFilter(filter_c) del l def testModel(self): m = QgsLocatorModel() p = QgsLocatorProxyModel(m) p.setSourceModel(m) l = QgsLocator() filter_a = test_filter('a') l.registerFilter(filter_a) l.foundResult.connect(m.addResult) context = QgsLocatorContext() l.fetchResults('a', context) for i in range(100): sleep(0.002) QCoreApplication.processEvents() # 4 results - one is locator name self.assertEqual(p.rowCount(), 4) self.assertEqual(p.data(p.index(0, 0)), 'test_a') self.assertEqual(p.data(p.index(0, 0), QgsLocatorModel.ResultTypeRole), 0) self.assertEqual(p.data(p.index(0, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a') self.assertEqual(p.data(p.index(1, 0)), 'a0') self.assertEqual(p.data(p.index(1, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup) self.assertEqual(p.data(p.index(1, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a') self.assertEqual(p.data(p.index(2, 0)), 'a1') self.assertEqual(p.data(p.index(2, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup) self.assertEqual(p.data(p.index(2, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a') self.assertEqual(p.data(p.index(3, 0)), 'a2') self.assertEqual(p.data(p.index(3, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup) self.assertEqual(p.data(p.index(3, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a') m.clear() self.assertEqual(p.rowCount(), 0) l.fetchResults('b', context) for i in range(100): sleep(0.002) QCoreApplication.processEvents() self.assertEqual(p.rowCount(), 4) self.assertEqual(p.data(p.index(1, 0)), 'a0') self.assertEqual(p.data(p.index(2, 0)), 'a1') self.assertEqual(p.data(p.index(3, 0)), 'a2') m.deferredClear() # should not be immediately cleared! self.assertEqual(p.rowCount(), 4) for i in range(100): sleep(0.002) QCoreApplication.processEvents() self.assertEqual(p.rowCount(), 0) m.clear() # test with groups self.assertEqual(p.rowCount(), 0) filter_b = test_filter('b', None, True) l.registerFilter(filter_b) l.fetchResults('c', context) for i in range(200): sleep(0.002) QCoreApplication.processEvents() self.assertEqual(p.rowCount(), 16) # 1 title a + 3 results + 1 title b + 2 groups + 9 results self.assertEqual(p.data(p.index(0, 0)), 'test_a') self.assertEqual(p.data(p.index(0, 0), QgsLocatorModel.ResultTypeRole), 0) self.assertEqual(p.data(p.index(1, 0)), 'a0') self.assertEqual(p.data(p.index(1, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup) self.assertEqual(p.data(p.index(2, 0)), 'a1') self.assertEqual(p.data(p.index(2, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup) self.assertEqual(p.data(p.index(3, 0)), 'a2') self.assertEqual(p.data(p.index(3, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup) self.assertEqual(p.data(p.index(4, 0)), 'test_b') self.assertEqual(p.data(p.index(4, 0), QgsLocatorModel.ResultTypeRole), 0) self.assertEqual(p.data(p.index(4, 0), QgsLocatorModel.ResultFilterNameRole), 'test_b') self.assertEqual(p.data(p.index(5, 0)).strip(), 'first group') self.assertEqual(p.data(p.index(5, 0), QgsLocatorModel.ResultTypeRole), 1) self.assertEqual(p.data(p.index(6, 0)), 'b0') self.assertEqual(p.data(p.index(6, 0), QgsLocatorModel.ResultTypeRole), 1) self.assertEqual(p.data(p.index(7, 0)), 'b1') self.assertEqual(p.data(p.index(7, 0), QgsLocatorModel.ResultTypeRole), 1) self.assertEqual(p.data(p.index(8, 0)), 'b2') self.assertEqual(p.data(p.index(8, 0), QgsLocatorModel.ResultTypeRole), 1) self.assertEqual(p.data(p.index(9, 0)), 'b3') self.assertEqual(p.data(p.index(9, 0), QgsLocatorModel.ResultTypeRole), 1) self.assertEqual(p.data(p.index(10, 0)), 'b4') self.assertEqual(p.data(p.index(10, 0), QgsLocatorModel.ResultTypeRole), 1) self.assertEqual(p.data(p.index(11, 0)), 'b5') self.assertEqual(p.data(p.index(11, 0), QgsLocatorModel.ResultTypeRole), 1) self.assertEqual(p.data(p.index(12, 0)).strip(), 'second group') self.assertEqual(p.data(p.index(12, 0), QgsLocatorModel.ResultTypeRole), 2) self.assertEqual(p.data(p.index(13, 0)), 'b6') self.assertEqual(p.data(p.index(13, 0), QgsLocatorModel.ResultTypeRole), 2) self.assertEqual(p.data(p.index(14, 0)), 'b7') self.assertEqual(p.data(p.index(14, 0), QgsLocatorModel.ResultTypeRole), 2) self.assertEqual(p.data(p.index(15, 0)), 'b8') self.assertEqual(p.data(p.index(15, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup) def testAutoModel(self): """ Test automatic model, QgsLocatorAutomaticModel - should be no need for any manual connections """ l = QgsLocator() m = QgsLocatorAutomaticModel(l) filter_a = test_filter('a') l.registerFilter(filter_a) m.search('a') for i in range(100): sleep(0.002) QCoreApplication.processEvents() # 4 results - one is locator name self.assertEqual(m.rowCount(), 4) self.assertEqual(m.data(m.index(0, 0)), 'test_a') self.assertEqual(m.data(m.index(0, 0), QgsLocatorModel.ResultTypeRole), 0) self.assertEqual(m.data(m.index(0, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a') self.assertEqual(m.data(m.index(1, 0)), 'a0') self.assertEqual(m.data(m.index(1, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup) self.assertEqual(m.data(m.index(1, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a') self.assertEqual(m.data(m.index(2, 0)), 'a1') self.assertEqual(m.data(m.index(2, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup) self.assertEqual(m.data(m.index(2, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a') self.assertEqual(m.data(m.index(3, 0)), 'a2') self.assertEqual(m.data(m.index(3, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup) self.assertEqual(m.data(m.index(3, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a') m.search('a') for i in range(100): sleep(0.002) QCoreApplication.processEvents() # 4 results - one is locator name self.assertEqual(m.rowCount(), 4) self.assertEqual(m.data(m.index(0, 0)), 'test_a') self.assertEqual(m.data(m.index(1, 0)), 'a0') self.assertEqual(m.data(m.index(2, 0)), 'a1') self.assertEqual(m.data(m.index(3, 0)), 'a2') def testStringMatches(self): self.assertFalse(QgsLocatorFilter.stringMatches('xxx', 'yyyy')) self.assertTrue(QgsLocatorFilter.stringMatches('axxxy', 'xxx')) self.assertTrue(QgsLocatorFilter.stringMatches('aXXXXy', 'xxx')) self.assertFalse(QgsLocatorFilter.stringMatches('aXXXXy', '')) if __name__ == '__main__': unittest.main()
gpl-2.0
helixyte/everest
everest/repositories/filesystem/repository.py
1
3673
""" File system repository. This file is part of the everest project. See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information. Created on Jan 7, 2013. """ from everest.mime import CsvMime from everest.repositories.memory.repository import MemoryRepository from everest.repositories.memory.repository import MemorySessionFactory from everest.resources.storing import dump_resource from everest.resources.storing import get_read_collection_path from everest.resources.storing import get_write_collection_path from everest.resources.storing import load_collection_from_url from everest.resources.utils import get_collection_class from everest.resources.utils import get_root_collection import os __all__ = ['FileSystemRepository', ] class FileSystemRepository(MemoryRepository): """ Repository using the file system as storage. On initialization, this repository loads resource representations from files into the root repository. Each commit operation writes the specified resource back to file. """ _configurables = MemoryRepository._configurables \ + ['directory', 'content_type'] def __init__(self, name, aggregate_class=None, join_transaction=True, autocommit=False): MemoryRepository.__init__(self, name, aggregate_class=aggregate_class, join_transaction=join_transaction, autocommit=autocommit) self.configure(directory=os.getcwd(), content_type=CsvMime, cache_loader=self.__load_entities) def commit(self, unit_of_work): """ Dump all resources that were modified by the given session back into the repository. """ MemoryRepository.commit(self, unit_of_work) if self.is_initialized: entity_classes_to_dump = set() for state in unit_of_work.iterator(): entity_classes_to_dump.add(type(state.entity)) for entity_cls in entity_classes_to_dump: self.__dump_entities(entity_cls) def _make_session_factory(self): return MemorySessionFactory(self) def __load_entities(self, entity_class): coll_cls = get_collection_class(entity_class) fn = get_read_collection_path(coll_cls, self._config['content_type'], directory=self._config['directory']) if not fn is None: url = 'file://%s' % fn coll = load_collection_from_url(coll_cls, url, content_type= self._config['content_type']) ents = [mb.get_entity() for mb in coll] else: ents = [] return ents def __dump_entities(self, entity_class): coll = get_root_collection(entity_class) # coll_cls = get_collection_class(entity_class) fn = get_write_collection_path(coll, #_cls, self._config['content_type'], directory=self._config['directory']) # # Wrap the entities in a temporary collection. # coll = create_staging_collection(coll_cls) # mb_cls = get_member_class(entity_class) # for ent in self.retrieve(entity_class): # coll.add(mb_cls.create_from_entity(ent)) # Open stream for writing and dump the collection. stream = open(fn, 'w') with stream: dump_resource(coll, stream, content_type=self._config['content_type'])
mit
hzlf/openbroadcast
website/apps/__rework_in_progress/importer/migrations/0003_auto__add_importfile.py
2
7260
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'ImportFile' db.create_table('importer_importfile', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)), ('updated', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)), ('filename', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)), ('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)), )) db.send_create_signal('importer', ['ImportFile']) def backwards(self, orm): # Deleting model 'ImportFile' db.delete_table('importer_importfile') models = { 'actstream.action': { 'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'}, 'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}), 'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}), 'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}), 'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'importer.import': { 'Meta': {'ordering': "('-created',)", 'object_name': 'Import'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'web'", 'max_length': "'10'"}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'import_user'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}) }, 'importer.importfile': { 'Meta': {'ordering': "('-created',)", 'object_name': 'ImportFile'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}) } } complete_apps = ['importer']
gpl-3.0
mancoast/CPythonPyc_test
cpython/211_test_fork1.py
15
1688
"""This test checks for correct fork() behavior. We want fork1() semantics -- only the forking thread survives in the child after a fork(). On some systems (e.g. Solaris without posix threads) we find that all active threads survive in the child after a fork(); this is an error. While BeOS doesn't officially support fork and native threading in the same application, the present example should work just fine. DC """ import os, sys, time, thread from test_support import verify, verbose, TestSkipped try: os.fork except AttributeError: raise TestSkipped, "os.fork not defined -- skipping test_fork1" LONGSLEEP = 2 SHORTSLEEP = 0.5 NUM_THREADS = 4 alive = {} stop = 0 def f(id): while not stop: alive[id] = os.getpid() try: time.sleep(SHORTSLEEP) except IOError: pass def main(): for i in range(NUM_THREADS): thread.start_new(f, (i,)) time.sleep(LONGSLEEP) a = alive.keys() a.sort() verify(a == range(NUM_THREADS)) prefork_lives = alive.copy() if sys.platform in ['unixware7']: cpid = os.fork1() else: cpid = os.fork() if cpid == 0: # Child time.sleep(LONGSLEEP) n = 0 for key in alive.keys(): if alive[key] != prefork_lives[key]: n = n+1 os._exit(n) else: # Parent spid, status = os.waitpid(cpid, 0) verify(spid == cpid) verify(status == 0, "cause = %d, exit = %d" % (status&0xff, status>>8) ) global stop # Tell threads to die stop = 1 time.sleep(2*SHORTSLEEP) # Wait for threads to die main()
gpl-3.0
llooker/python_sdk
test/test_render_task_api.py
1
3241
# coding: utf-8 """ Looker API 3.0 Reference ### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning) OpenAPI spec version: 3.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import os import sys import unittest import swagger_client from swagger_client.rest import ApiException from swagger_client.apis.render_task_api import RenderTaskApi class TestRenderTaskApi(unittest.TestCase): """ RenderTaskApi unit test stubs """ def setUp(self): self.api = swagger_client.apis.render_task_api.RenderTaskApi() def tearDown(self): pass def test_create_dashboard_render_task(self): """ Test case for create_dashboard_render_task Create Dashboard Render Task """ pass def test_create_look_render_task(self): """ Test case for create_look_render_task Create Look Render Task """ pass def test_create_lookml_dashboard_render_task(self): """ Test case for create_lookml_dashboard_render_task Create Lookml Dashboard Render Task """ pass def test_create_query_render_task(self): """ Test case for create_query_render_task Create Query Render Task """ pass def test_render_task(self): """ Test case for render_task Get Render Task """ pass def test_render_task_results(self): """ Test case for render_task_results Render Task Results """ pass if __name__ == '__main__': unittest.main()
mit
AndrewSallans/osf.io
framework/mongo/handlers.py
1
2660
# -*- coding: utf-8 -*- import logging from flask import g from pymongo import MongoClient from werkzeug.local import LocalProxy from website import settings logger = logging.getLogger(__name__) def get_mongo_client(): """Create MongoDB client and authenticate database. """ mongo_uri = 'mongodb://localhost:{port}'.format(port=settings.DB_PORT) client = MongoClient(mongo_uri) db = client[settings.DB_NAME] if settings.DB_USER and settings.DB_PASS: db.authenticate(settings.DB_USER, settings.DB_PASS) return client def connection_before_request(): """Attach MongoDB client to `g`. """ g._mongo_client = get_mongo_client() def connection_teardown_request(error=None): """Close MongoDB client if attached to `g`. """ try: g._mongo_client.close() except AttributeError: if not settings.DEBUG_MODE: logger.error('MongoDB client not attached to request.') handlers = { 'before_request': connection_before_request, 'teardown_request': connection_teardown_request, } # Set up getters for `LocalProxy` objects _mongo_client = get_mongo_client() def _get_current_client(): """Getter for `client` proxy. Return default client if no client attached to `g` or no request context. """ try: return g._mongo_client except (AttributeError, RuntimeError): return _mongo_client def _get_current_database(): """Getter for `database` proxy. """ return _get_current_client()[settings.DB_NAME] # Set up `LocalProxy` objects client = LocalProxy(_get_current_client) database = LocalProxy(_get_current_database) def set_up_storage(schemas, storage_class, prefix='', addons=None, **kwargs): '''Setup the storage backend for each schema in ``schemas``. note:: ``**kwargs`` are passed to the constructor of ``storage_class`` Example usage with modular-odm and pymongo: :: >>> from pymongo import MongoClient >>> from modularodm.storage import MongoStorage >>> from models import User, ApiKey, Node, Tag >>> client = MongoClient(port=20771) >>> db = client['mydb'] >>> models = [User, ApiKey, Node, Tag] >>> set_up_storage(models, MongoStorage) ''' _schemas = [] _schemas.extend(schemas) for addon in (addons or []): _schemas.extend(addon.models) for schema in _schemas: collection = '{0}{1}'.format(prefix, schema._name) schema.set_storage( storage_class( db=database, collection=collection, **kwargs ) )
apache-2.0
js0701/chromium-crosswalk
tools/cr/cr/commands/shell.py
103
1724
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A module for the shell command.""" import os import tempfile import cr class ShellCommand(cr.Command): """The implementation of the shell command. The shell command is the escape hatch that lets user run any program in the same environment that cr would use if it were running it. """ def __init__(self): super(ShellCommand, self).__init__() self.help = 'Launch a shell' self.description = (""" If no arguments are present, this launches an interactive system shell (ie bash) with the environment modified to that used for the build systems. If any arguments are present, they are used as a command line to run in that shell. This allows you to run commands that are not yet available natively in cr. """) def AddArguments(self, subparsers): parser = super(ShellCommand, self).AddArguments(subparsers) self.ConsumeArgs(parser, 'the shell') return parser def Run(self): if cr.context.remains: cr.Host.Shell(*cr.context.remains) return # If we get here, we are trying to launch an interactive shell shell = os.environ.get('SHELL', None) if shell is None: print 'Don\'t know how to run a shell on this system' elif shell.endswith('bash'): ps1 = '[CR] ' + os.environ.get('PS1', '') with tempfile.NamedTemporaryFile() as rcfile: rcfile.write('source ~/.bashrc\nPS1="'+ps1+'"') rcfile.flush() cr.Host.Execute(shell, '--rcfile', rcfile.name) else: cr.Host.Execute(shell)
bsd-3-clause
emergebtc/muddery
templates/example/server/conf/connection_screens.py
1
1277
# -*- coding: utf-8 -*- """ Connection screen Texts in this module will be shown to the user at login-time. Muddery will look at global string variables (variables defined at the "outermost" scope of this module and use it as the connection screen. If there are more than one, Muddery will randomize which one it displays. The commands available to the user when the connection screen is shown are defined in commands.default_cmdsets.UnloggedinCmdSet and the screen is read and displayed by the unlogged-in "look" command. """ from django.conf import settings from muddery.utils import utils CONNECTION_SCREEN = \ """{b=============================================================={n Welcome to the demo game of Muddery! This version is created on Sep. 1, 2015. This demo game is based on Muddery, an open-source online text game server. If you are interested in it, please visit our website www.muddery.org. The map of this game is developed from Evennia's tutorial world. Please register or login! {rNotice! This game is for demo only, all players' register info and game data may be lost frequently!{n {b=============================================================={n""" # % (settings.SERVERNAME, utils.get_muddery_version())
bsd-3-clause
blueboxgroup/nova
nova/api/openstack/compute/plugins/v3/pause_server.py
12
3487
# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import compute from nova import exception from nova.i18n import _ ALIAS = "os-pause-server" authorize = extensions.os_compute_authorizer(ALIAS) class PauseServerController(wsgi.Controller): def __init__(self, *args, **kwargs): super(PauseServerController, self).__init__(*args, **kwargs) self.compute_api = compute.API(skip_policy_check=True) @wsgi.response(202) @extensions.expected_errors((404, 409, 501)) @wsgi.action('pause') def _pause(self, req, id, body): """Permit Admins to pause the server.""" ctxt = req.environ['nova.context'] authorize(ctxt, action='pause') server = common.get_instance(self.compute_api, ctxt, id) try: self.compute_api.pause(ctxt, server) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'pause', id) except exception.InstanceNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except NotImplementedError: msg = _("Virt driver does not implement pause function.") raise exc.HTTPNotImplemented(explanation=msg) @wsgi.response(202) @extensions.expected_errors((404, 409, 501)) @wsgi.action('unpause') def _unpause(self, req, id, body): """Permit Admins to unpause the server.""" ctxt = req.environ['nova.context'] authorize(ctxt, action='unpause') server = common.get_instance(self.compute_api, ctxt, id) try: self.compute_api.unpause(ctxt, server) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'unpause', id) except exception.InstanceNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except NotImplementedError: msg = _("Virt driver does not implement pause function.") raise exc.HTTPNotImplemented(explanation=msg) class PauseServer(extensions.V3APIExtensionBase): """Enable pause/unpause server actions.""" name = "PauseServer" alias = ALIAS version = 1 def get_controller_extensions(self): controller = PauseServerController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def get_resources(self): return []
apache-2.0
lupyuen/RaspberryPiImage
home/pi/GrovePi/Software/Python/others/temboo/Library/Amazon/IAM/PutUserPolicy.py
5
4374
# -*- coding: utf-8 -*- ############################################################################### # # PutUserPolicy # Adds or updates a policy document associated with a specified user. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class PutUserPolicy(Choreography): def __init__(self, temboo_session): """ Create a new instance of the PutUserPolicy Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(PutUserPolicy, self).__init__(temboo_session, '/Library/Amazon/IAM/PutUserPolicy') def new_input_set(self): return PutUserPolicyInputSet() def _make_result_set(self, result, path): return PutUserPolicyResultSet(result, path) def _make_execution(self, session, exec_id, path): return PutUserPolicyChoreographyExecution(session, exec_id, path) class PutUserPolicyInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the PutUserPolicy Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_PolicyDocument(self, value): """ Set the value of the PolicyDocument input for this Choreo. ((required, json) The policy document. See documentation for formatting examples.) """ super(PutUserPolicyInputSet, self)._set_input('PolicyDocument', value) def set_AWSAccessKeyId(self, value): """ Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.) """ super(PutUserPolicyInputSet, self)._set_input('AWSAccessKeyId', value) def set_AWSSecretKeyId(self, value): """ Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.) """ super(PutUserPolicyInputSet, self)._set_input('AWSSecretKeyId', value) def set_PolicyName(self, value): """ Set the value of the PolicyName input for this Choreo. ((required, string) The name of the policy document.) """ super(PutUserPolicyInputSet, self)._set_input('PolicyName', value) def set_ResponseFormat(self, value): """ Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".) """ super(PutUserPolicyInputSet, self)._set_input('ResponseFormat', value) def set_UserName(self, value): """ Set the value of the UserName input for this Choreo. ((required, string) The name of the user to associate the policy with.) """ super(PutUserPolicyInputSet, self)._set_input('UserName', value) class PutUserPolicyResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the PutUserPolicy Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.) """ return self._output.get('Response', None) class PutUserPolicyChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return PutUserPolicyResultSet(response, path)
apache-2.0
jcpowermac/ansible
lib/ansible/plugins/callback/dense.py
38
17316
# (c) 2016, Dag Wieers <dag@wieers.com> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' callback: dense type: stdout short_description: minimal stdout output extends_documentation_fragment: - default_callback description: - When in verbose mode it will act the same as the default callback version_added: "2.3" requirements: - set as stdout in configuation ''' from collections import MutableMapping, MutableSequence HAS_OD = False try: from collections import OrderedDict HAS_OD = True except ImportError: pass from ansible.module_utils.six import binary_type, text_type from ansible.plugins.callback.default import CallbackModule as CallbackModule_default from ansible.utils.color import colorize, hostcolor try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() import sys # Design goals: # # + On screen there should only be relevant stuff # - How far are we ? (during run, last line) # - What issues occurred # - What changes occurred # - Diff output (in diff-mode) # # + If verbosity increases, act as default output # So that users can easily switch to default for troubleshooting # # + Rewrite the output during processing # - We use the cursor to indicate where in the task we are. # Output after the prompt is the output of the previous task. # - If we would clear the line at the start of a task, there would often # be no information at all, so we leave it until it gets updated # # + Use the same color-conventions of Ansible # # + Ensure the verbose output (-v) is also dense. # Remove information that is not essential (eg. timestamps, status) # TODO: # # + Properly test for terminal capabilities, and fall back to default # + Modify Ansible mechanism so we don't need to use sys.stdout directly # + Find an elegant solution for progress bar line wrapping # FIXME: Importing constants as C simply does not work, beats me :-/ # from ansible import constants as C class C: COLOR_HIGHLIGHT = 'white' COLOR_VERBOSE = 'blue' COLOR_WARN = 'bright purple' COLOR_ERROR = 'red' COLOR_DEBUG = 'dark gray' COLOR_DEPRECATE = 'purple' COLOR_SKIP = 'cyan' COLOR_UNREACHABLE = 'bright red' COLOR_OK = 'green' COLOR_CHANGED = 'yellow' # Taken from Dstat class vt100: black = '\033[0;30m' darkred = '\033[0;31m' darkgreen = '\033[0;32m' darkyellow = '\033[0;33m' darkblue = '\033[0;34m' darkmagenta = '\033[0;35m' darkcyan = '\033[0;36m' gray = '\033[0;37m' darkgray = '\033[1;30m' red = '\033[1;31m' green = '\033[1;32m' yellow = '\033[1;33m' blue = '\033[1;34m' magenta = '\033[1;35m' cyan = '\033[1;36m' white = '\033[1;37m' blackbg = '\033[40m' redbg = '\033[41m' greenbg = '\033[42m' yellowbg = '\033[43m' bluebg = '\033[44m' magentabg = '\033[45m' cyanbg = '\033[46m' whitebg = '\033[47m' reset = '\033[0;0m' bold = '\033[1m' reverse = '\033[2m' underline = '\033[4m' clear = '\033[2J' # clearline = '\033[K' clearline = '\033[2K' save = '\033[s' restore = '\033[u' save_all = '\0337' restore_all = '\0338' linewrap = '\033[7h' nolinewrap = '\033[7l' up = '\033[1A' down = '\033[1B' right = '\033[1C' left = '\033[1D' colors = dict( ok=vt100.darkgreen, changed=vt100.darkyellow, skipped=vt100.darkcyan, ignored=vt100.cyanbg + vt100.red, failed=vt100.darkred, unreachable=vt100.red, ) states = ('skipped', 'ok', 'changed', 'failed', 'unreachable') class CallbackModule_dense(CallbackModule_default): ''' This is the dense callback interface, where screen estate is still valued. ''' CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'dense' def __init__(self): # From CallbackModule self._display = display if HAS_OD: self.disabled = False self.super_ref = super(CallbackModule, self) self.super_ref.__init__() # Attributes to remove from results for more density self.removed_attributes = ( # 'changed', 'delta', # 'diff', 'end', 'failed', 'failed_when_result', 'invocation', 'start', 'stdout_lines', ) # Initiate data structures self.hosts = OrderedDict() self.keep = False self.shown_title = False self.count = dict(play=0, handler=0, task=0) self.type = 'foo' # Start immediately on the first line sys.stdout.write(vt100.reset + vt100.save + vt100.clearline) sys.stdout.flush() else: display.warning("The 'dense' callback plugin requires OrderedDict which is not available in this version of python, disabling.") self.disabled = True def __del__(self): sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) def _add_host(self, result, status): name = result._host.get_name() # Add a new status in case a failed task is ignored if status == 'failed' and result._task.ignore_errors: status = 'ignored' # Check if we have to update an existing state (when looping over items) if name not in self.hosts: self.hosts[name] = dict(state=status) elif states.index(self.hosts[name]['state']) < states.index(status): self.hosts[name]['state'] = status # Store delegated hostname, if needed delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: self.hosts[name]['delegate'] = delegated_vars['ansible_host'] # Print progress bar self._display_progress(result) # # Ensure that tasks with changes/failures stay on-screen, and during diff-mode # if status in ['changed', 'failed', 'unreachable'] or (result.get('_diff_mode', False) and result._resultget('diff', False)): # Ensure that tasks with changes/failures stay on-screen if status in ['changed', 'failed', 'unreachable']: self.keep = True if self._display.verbosity == 1: # Print task title, if needed self._display_task_banner() self._display_results(result, status) def _clean_results(self, result): # Remove non-essential atributes for attr in self.removed_attributes: if attr in result: del(result[attr]) # Remove empty attributes (list, dict, str) for attr in result.copy(): if isinstance(result[attr], (MutableSequence, MutableMapping, binary_type, text_type)): if not result[attr]: del(result[attr]) def _handle_exceptions(self, result): if 'exception' in result: # Remove the exception from the result so it's not shown every time del result['exception'] if self._display.verbosity == 1: return "An exception occurred during task execution. To see the full traceback, use -vvv." def _display_progress(self, result=None): # Always rewrite the complete line sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline) sys.stdout.write('%s %d:' % (self.type, self.count[self.type])) sys.stdout.write(vt100.reset) sys.stdout.flush() # Print out each host in its own status-color for name in self.hosts: sys.stdout.write(' ') if self.hosts[name].get('delegate', None): sys.stdout.write(self.hosts[name]['delegate'] + '>') sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset) sys.stdout.flush() # if result._result.get('diff', False): # sys.stdout.write('\n' + vt100.linewrap) sys.stdout.write(vt100.linewrap) # self.keep = True def _display_task_banner(self): if not self.shown_title: self.shown_title = True sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline) sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip())) sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) sys.stdout.flush() else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) self.keep = False def _display_results(self, result, status): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) self.keep = False self._clean_results(result._result) dump = '' if result._task.action == 'include': return elif status == 'ok': return elif status == 'ignored': dump = self._handle_exceptions(result._result) elif status == 'failed': dump = self._handle_exceptions(result._result) elif status == 'unreachable': dump = result._result['msg'] if not dump: dump = self._dump_results(result._result) if result._task.loop and 'results' in result._result: self._process_items(result) else: sys.stdout.write(colors[status] + status + ': ') delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host']) else: sys.stdout.write(result._host.get_name()) sys.stdout.write(': ' + dump + '\n') sys.stdout.write(vt100.reset + vt100.save + vt100.clearline) sys.stdout.flush() if status == 'changed': self._handle_warnings(result._result) def v2_playbook_on_play_start(self, play): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold) else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold) # Reset at the start of each play self.keep = False self.count.update(dict(handler=0, task=0)) self.count['play'] += 1 self.play = play # Write the next play on screen IN UPPERCASE, and make it permanent name = play.get_name().strip() if not name: name = 'unnamed' sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper())) sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) sys.stdout.flush() def v2_playbook_on_task_start(self, task, is_conditional): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline) else: # Do not clear line, since we want to retain the previous output sys.stdout.write(vt100.restore + vt100.reset + vt100.underline) # Reset at the start of each task self.keep = False self.shown_title = False self.hosts = OrderedDict() self.task = task self.type = 'task' # Enumerate task if not setup (task names are too long for dense output) if task.get_name() != 'setup': self.count['task'] += 1 # Write the next task on screen (behind the prompt is the previous output) sys.stdout.write('%s %d.' % (self.type, self.count[self.type])) sys.stdout.write(vt100.reset) sys.stdout.flush() def v2_playbook_on_handler_task_start(self, task): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline) else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline) # Reset at the start of each handler self.keep = False self.shown_title = False self.hosts = OrderedDict() self.task = task self.type = 'handler' # Enumerate handler if not setup (handler names may be too long for dense output) if task.get_name() != 'setup': self.count[self.type] += 1 # Write the next task on screen (behind the prompt is the previous output) sys.stdout.write('%s %d.' % (self.type, self.count[self.type])) sys.stdout.write(vt100.reset) sys.stdout.flush() def v2_playbook_on_cleanup_task_start(self, task): # TBD sys.stdout.write('cleanup.') sys.stdout.flush() def v2_runner_on_failed(self, result, ignore_errors=False): self._add_host(result, 'failed') def v2_runner_on_ok(self, result): if result._result.get('changed', False): self._add_host(result, 'changed') else: self._add_host(result, 'ok') def v2_runner_on_skipped(self, result): self._add_host(result, 'skipped') def v2_runner_on_unreachable(self, result): self._add_host(result, 'unreachable') def v2_runner_on_include(self, included_file): pass def v2_runner_on_file_diff(self, result, diff): sys.stdout.write(vt100.bold) self.super_ref.v2_runner_on_file_diff(result, diff) sys.stdout.write(vt100.reset) def v2_on_file_diff(self, result): sys.stdout.write(vt100.bold) self.super_ref.v2_on_file_diff(result) sys.stdout.write(vt100.reset) # Old definition in v2.0 def v2_playbook_item_on_ok(self, result): self.v2_runner_item_on_ok(result) def v2_runner_item_on_ok(self, result): if result._result.get('changed', False): self._add_host(result, 'changed') else: self._add_host(result, 'ok') # Old definition in v2.0 def v2_playbook_item_on_failed(self, result): self.v2_runner_item_on_failed(result) def v2_runner_item_on_failed(self, result): self._add_host(result, 'failed') # Old definition in v2.0 def v2_playbook_item_on_skipped(self, result): self.v2_runner_item_on_skipped(result) def v2_runner_item_on_skipped(self, result): self._add_host(result, 'skipped') def v2_playbook_on_no_hosts_remaining(self): if self._display.verbosity == 0 and self.keep: sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) self.keep = False sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT') sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) sys.stdout.flush() def v2_playbook_on_include(self, included_file): pass def v2_playbook_on_stats(self, stats): if self._display.verbosity == 0 and self.keep: sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) # In normal mode screen output should be sufficient, summary is redundant if self._display.verbosity == 0: return sys.stdout.write(vt100.bold + vt100.underline) sys.stdout.write('SUMMARY') sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) sys.stdout.flush() hosts = sorted(stats.processed.keys()) for h in hosts: t = stats.summarize(h) self._display.display(u"%s : %s %s %s %s" % ( hostcolor(h, t), colorize(u'ok', t['ok'], C.COLOR_OK), colorize(u'changed', t['changed'], C.COLOR_CHANGED), colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), colorize(u'failed', t['failures'], C.COLOR_ERROR)), screen_only=True ) # When using -vv or higher, simply do the default action if display.verbosity >= 2 or not HAS_OD: CallbackModule = CallbackModule_default else: CallbackModule = CallbackModule_dense
gpl-3.0
arael120/App-Beicor
node_modules/node-gyp/gyp/pylib/gyp/generator/analyzer.py
1382
30567
# Copyright (c) 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ This script is intended for use as a GYP_GENERATOR. It takes as input (by way of the generator flag config_path) the path of a json file that dictates the files and targets to search for. The following keys are supported: files: list of paths (relative) of the files to search for. test_targets: unqualified target names to search for. Any target in this list that depends upon a file in |files| is output regardless of the type of target or chain of dependencies. additional_compile_targets: Unqualified targets to search for in addition to test_targets. Targets in the combined list that depend upon a file in |files| are not necessarily output. For example, if the target is of type none then the target is not output (but one of the descendants of the target will be). The following is output: error: only supplied if there is an error. compile_targets: minimal set of targets that directly or indirectly (for targets of type none) depend on the files in |files| and is one of the supplied targets or a target that one of the supplied targets depends on. The expectation is this set of targets is passed into a build step. This list always contains the output of test_targets as well. test_targets: set of targets from the supplied |test_targets| that either directly or indirectly depend upon a file in |files|. This list if useful if additional processing needs to be done for certain targets after the build, such as running tests. status: outputs one of three values: none of the supplied files were found, one of the include files changed so that it should be assumed everything changed (in this case test_targets and compile_targets are not output) or at least one file was found. invalid_targets: list of supplied targets that were not found. Example: Consider a graph like the following: A D / \ B C A depends upon both B and C, A is of type none and B and C are executables. D is an executable, has no dependencies and nothing depends on it. If |additional_compile_targets| = ["A"], |test_targets| = ["B", "C"] and files = ["b.cc", "d.cc"] (B depends upon b.cc and D depends upon d.cc), then the following is output: |compile_targets| = ["B"] B must built as it depends upon the changed file b.cc and the supplied target A depends upon it. A is not output as a build_target as it is of type none with no rules and actions. |test_targets| = ["B"] B directly depends upon the change file b.cc. Even though the file d.cc, which D depends upon, has changed D is not output as it was not supplied by way of |additional_compile_targets| or |test_targets|. If the generator flag analyzer_output_path is specified, output is written there. Otherwise output is written to stdout. In Gyp the "all" target is shorthand for the root targets in the files passed to gyp. For example, if file "a.gyp" contains targets "a1" and "a2", and file "b.gyp" contains targets "b1" and "b2" and "a2" has a dependency on "b2" and gyp is supplied "a.gyp" then "all" consists of "a1" and "a2". Notice that "b1" and "b2" are not in the "all" target as "b.gyp" was not directly supplied to gyp. OTOH if both "a.gyp" and "b.gyp" are supplied to gyp then the "all" target includes "b1" and "b2". """ import gyp.common import gyp.ninja_syntax as ninja_syntax import json import os import posixpath import sys debug = False found_dependency_string = 'Found dependency' no_dependency_string = 'No dependencies' # Status when it should be assumed that everything has changed. all_changed_string = 'Found dependency (all)' # MatchStatus is used indicate if and how a target depends upon the supplied # sources. # The target's sources contain one of the supplied paths. MATCH_STATUS_MATCHES = 1 # The target has a dependency on another target that contains one of the # supplied paths. MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2 # The target's sources weren't in the supplied paths and none of the target's # dependencies depend upon a target that matched. MATCH_STATUS_DOESNT_MATCH = 3 # The target doesn't contain the source, but the dependent targets have not yet # been visited to determine a more specific status yet. MATCH_STATUS_TBD = 4 generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested() generator_wants_static_library_dependencies_adjusted = False generator_default_variables = { } for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']: generator_default_variables[dirname] = '!!!' for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'CONFIGURATION_NAME']: generator_default_variables[unused] = '' def _ToGypPath(path): """Converts a path to the format used by gyp.""" if os.sep == '\\' and os.altsep == '/': return path.replace('\\', '/') return path def _ResolveParent(path, base_path_components): """Resolves |path|, which starts with at least one '../'. Returns an empty string if the path shouldn't be considered. See _AddSources() for a description of |base_path_components|.""" depth = 0 while path.startswith('../'): depth += 1 path = path[3:] # Relative includes may go outside the source tree. For example, an action may # have inputs in /usr/include, which are not in the source tree. if depth > len(base_path_components): return '' if depth == len(base_path_components): return path return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \ '/' + path def _AddSources(sources, base_path, base_path_components, result): """Extracts valid sources from |sources| and adds them to |result|. Each source file is relative to |base_path|, but may contain '..'. To make resolving '..' easier |base_path_components| contains each of the directories in |base_path|. Additionally each source may contain variables. Such sources are ignored as it is assumed dependencies on them are expressed and tracked in some other means.""" # NOTE: gyp paths are always posix style. for source in sources: if not len(source) or source.startswith('!!!') or source.startswith('$'): continue # variable expansion may lead to //. org_source = source source = source[0] + source[1:].replace('//', '/') if source.startswith('../'): source = _ResolveParent(source, base_path_components) if len(source): result.append(source) continue result.append(base_path + source) if debug: print 'AddSource', org_source, result[len(result) - 1] def _ExtractSourcesFromAction(action, base_path, base_path_components, results): if 'inputs' in action: _AddSources(action['inputs'], base_path, base_path_components, results) def _ToLocalPath(toplevel_dir, path): """Converts |path| to a path relative to |toplevel_dir|.""" if path == toplevel_dir: return '' if path.startswith(toplevel_dir + '/'): return path[len(toplevel_dir) + len('/'):] return path def _ExtractSources(target, target_dict, toplevel_dir): # |target| is either absolute or relative and in the format of the OS. Gyp # source paths are always posix. Convert |target| to a posix path relative to # |toplevel_dir_|. This is done to make it easy to build source paths. base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target))) base_path_components = base_path.split('/') # Add a trailing '/' so that _AddSources() can easily build paths. if len(base_path): base_path += '/' if debug: print 'ExtractSources', target, base_path results = [] if 'sources' in target_dict: _AddSources(target_dict['sources'], base_path, base_path_components, results) # Include the inputs from any actions. Any changes to these affect the # resulting output. if 'actions' in target_dict: for action in target_dict['actions']: _ExtractSourcesFromAction(action, base_path, base_path_components, results) if 'rules' in target_dict: for rule in target_dict['rules']: _ExtractSourcesFromAction(rule, base_path, base_path_components, results) return results class Target(object): """Holds information about a particular target: deps: set of Targets this Target depends upon. This is not recursive, only the direct dependent Targets. match_status: one of the MatchStatus values. back_deps: set of Targets that have a dependency on this Target. visited: used during iteration to indicate whether we've visited this target. This is used for two iterations, once in building the set of Targets and again in _GetBuildTargets(). name: fully qualified name of the target. requires_build: True if the target type is such that it needs to be built. See _DoesTargetTypeRequireBuild for details. added_to_compile_targets: used when determining if the target was added to the set of targets that needs to be built. in_roots: true if this target is a descendant of one of the root nodes. is_executable: true if the type of target is executable. is_static_library: true if the type of target is static_library. is_or_has_linked_ancestor: true if the target does a link (eg executable), or if there is a target in back_deps that does a link.""" def __init__(self, name): self.deps = set() self.match_status = MATCH_STATUS_TBD self.back_deps = set() self.name = name # TODO(sky): I don't like hanging this off Target. This state is specific # to certain functions and should be isolated there. self.visited = False self.requires_build = False self.added_to_compile_targets = False self.in_roots = False self.is_executable = False self.is_static_library = False self.is_or_has_linked_ancestor = False class Config(object): """Details what we're looking for files: set of files to search for targets: see file description for details.""" def __init__(self): self.files = [] self.targets = set() self.additional_compile_target_names = set() self.test_target_names = set() def Init(self, params): """Initializes Config. This is a separate method as it raises an exception if there is a parse error.""" generator_flags = params.get('generator_flags', {}) config_path = generator_flags.get('config_path', None) if not config_path: return try: f = open(config_path, 'r') config = json.load(f) f.close() except IOError: raise Exception('Unable to open file ' + config_path) except ValueError as e: raise Exception('Unable to parse config file ' + config_path + str(e)) if not isinstance(config, dict): raise Exception('config_path must be a JSON file containing a dictionary') self.files = config.get('files', []) self.additional_compile_target_names = set( config.get('additional_compile_targets', [])) self.test_target_names = set(config.get('test_targets', [])) def _WasBuildFileModified(build_file, data, files, toplevel_dir): """Returns true if the build file |build_file| is either in |files| or one of the files included by |build_file| is in |files|. |toplevel_dir| is the root of the source tree.""" if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files: if debug: print 'gyp file modified', build_file return True # First element of included_files is the file itself. if len(data[build_file]['included_files']) <= 1: return False for include_file in data[build_file]['included_files'][1:]: # |included_files| are relative to the directory of the |build_file|. rel_include_file = \ _ToGypPath(gyp.common.UnrelativePath(include_file, build_file)) if _ToLocalPath(toplevel_dir, rel_include_file) in files: if debug: print 'included gyp file modified, gyp_file=', build_file, \ 'included file=', rel_include_file return True return False def _GetOrCreateTargetByName(targets, target_name): """Creates or returns the Target at targets[target_name]. If there is no Target for |target_name| one is created. Returns a tuple of whether a new Target was created and the Target.""" if target_name in targets: return False, targets[target_name] target = Target(target_name) targets[target_name] = target return True, target def _DoesTargetTypeRequireBuild(target_dict): """Returns true if the target type is such that it needs to be built.""" # If a 'none' target has rules or actions we assume it requires a build. return bool(target_dict['type'] != 'none' or target_dict.get('actions') or target_dict.get('rules')) def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files, build_files): """Returns a tuple of the following: . A dictionary mapping from fully qualified name to Target. . A list of the targets that have a source file in |files|. . Targets that constitute the 'all' target. See description at top of file for details on the 'all' target. This sets the |match_status| of the targets that contain any of the source files in |files| to MATCH_STATUS_MATCHES. |toplevel_dir| is the root of the source tree.""" # Maps from target name to Target. name_to_target = {} # Targets that matched. matching_targets = [] # Queue of targets to visit. targets_to_visit = target_list[:] # Maps from build file to a boolean indicating whether the build file is in # |files|. build_file_in_files = {} # Root targets across all files. roots = set() # Set of Targets in |build_files|. build_file_targets = set() while len(targets_to_visit) > 0: target_name = targets_to_visit.pop() created_target, target = _GetOrCreateTargetByName(name_to_target, target_name) if created_target: roots.add(target) elif target.visited: continue target.visited = True target.requires_build = _DoesTargetTypeRequireBuild( target_dicts[target_name]) target_type = target_dicts[target_name]['type'] target.is_executable = target_type == 'executable' target.is_static_library = target_type == 'static_library' target.is_or_has_linked_ancestor = (target_type == 'executable' or target_type == 'shared_library') build_file = gyp.common.ParseQualifiedTarget(target_name)[0] if not build_file in build_file_in_files: build_file_in_files[build_file] = \ _WasBuildFileModified(build_file, data, files, toplevel_dir) if build_file in build_files: build_file_targets.add(target) # If a build file (or any of its included files) is modified we assume all # targets in the file are modified. if build_file_in_files[build_file]: print 'matching target from modified build file', target_name target.match_status = MATCH_STATUS_MATCHES matching_targets.append(target) else: sources = _ExtractSources(target_name, target_dicts[target_name], toplevel_dir) for source in sources: if _ToGypPath(os.path.normpath(source)) in files: print 'target', target_name, 'matches', source target.match_status = MATCH_STATUS_MATCHES matching_targets.append(target) break # Add dependencies to visit as well as updating back pointers for deps. for dep in target_dicts[target_name].get('dependencies', []): targets_to_visit.append(dep) created_dep_target, dep_target = _GetOrCreateTargetByName(name_to_target, dep) if not created_dep_target: roots.discard(dep_target) target.deps.add(dep_target) dep_target.back_deps.add(target) return name_to_target, matching_targets, roots & build_file_targets def _GetUnqualifiedToTargetMapping(all_targets, to_find): """Returns a tuple of the following: . mapping (dictionary) from unqualified name to Target for all the Targets in |to_find|. . any target names not found. If this is empty all targets were found.""" result = {} if not to_find: return {}, [] to_find = set(to_find) for target_name in all_targets.keys(): extracted = gyp.common.ParseQualifiedTarget(target_name) if len(extracted) > 1 and extracted[1] in to_find: to_find.remove(extracted[1]) result[extracted[1]] = all_targets[target_name] if not to_find: return result, [] return result, [x for x in to_find] def _DoesTargetDependOnMatchingTargets(target): """Returns true if |target| or any of its dependencies is one of the targets containing the files supplied as input to analyzer. This updates |matches| of the Targets as it recurses. target: the Target to look for.""" if target.match_status == MATCH_STATUS_DOESNT_MATCH: return False if target.match_status == MATCH_STATUS_MATCHES or \ target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY: return True for dep in target.deps: if _DoesTargetDependOnMatchingTargets(dep): target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY print '\t', target.name, 'matches by dep', dep.name return True target.match_status = MATCH_STATUS_DOESNT_MATCH return False def _GetTargetsDependingOnMatchingTargets(possible_targets): """Returns the list of Targets in |possible_targets| that depend (either directly on indirectly) on at least one of the targets containing the files supplied as input to analyzer. possible_targets: targets to search from.""" found = [] print 'Targets that matched by dependency:' for target in possible_targets: if _DoesTargetDependOnMatchingTargets(target): found.append(target) return found def _AddCompileTargets(target, roots, add_if_no_ancestor, result): """Recurses through all targets that depend on |target|, adding all targets that need to be built (and are in |roots|) to |result|. roots: set of root targets. add_if_no_ancestor: If true and there are no ancestors of |target| then add |target| to |result|. |target| must still be in |roots|. result: targets that need to be built are added here.""" if target.visited: return target.visited = True target.in_roots = target in roots for back_dep_target in target.back_deps: _AddCompileTargets(back_dep_target, roots, False, result) target.added_to_compile_targets |= back_dep_target.added_to_compile_targets target.in_roots |= back_dep_target.in_roots target.is_or_has_linked_ancestor |= ( back_dep_target.is_or_has_linked_ancestor) # Always add 'executable' targets. Even though they may be built by other # targets that depend upon them it makes detection of what is going to be # built easier. # And always add static_libraries that have no dependencies on them from # linkables. This is necessary as the other dependencies on them may be # static libraries themselves, which are not compile time dependencies. if target.in_roots and \ (target.is_executable or (not target.added_to_compile_targets and (add_if_no_ancestor or target.requires_build)) or (target.is_static_library and add_if_no_ancestor and not target.is_or_has_linked_ancestor)): print '\t\tadding to compile targets', target.name, 'executable', \ target.is_executable, 'added_to_compile_targets', \ target.added_to_compile_targets, 'add_if_no_ancestor', \ add_if_no_ancestor, 'requires_build', target.requires_build, \ 'is_static_library', target.is_static_library, \ 'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor result.add(target) target.added_to_compile_targets = True def _GetCompileTargets(matching_targets, supplied_targets): """Returns the set of Targets that require a build. matching_targets: targets that changed and need to be built. supplied_targets: set of targets supplied to analyzer to search from.""" result = set() for target in matching_targets: print 'finding compile targets for match', target.name _AddCompileTargets(target, supplied_targets, True, result) return result def _WriteOutput(params, **values): """Writes the output, either to stdout or a file is specified.""" if 'error' in values: print 'Error:', values['error'] if 'status' in values: print values['status'] if 'targets' in values: values['targets'].sort() print 'Supplied targets that depend on changed files:' for target in values['targets']: print '\t', target if 'invalid_targets' in values: values['invalid_targets'].sort() print 'The following targets were not found:' for target in values['invalid_targets']: print '\t', target if 'build_targets' in values: values['build_targets'].sort() print 'Targets that require a build:' for target in values['build_targets']: print '\t', target if 'compile_targets' in values: values['compile_targets'].sort() print 'Targets that need to be built:' for target in values['compile_targets']: print '\t', target if 'test_targets' in values: values['test_targets'].sort() print 'Test targets:' for target in values['test_targets']: print '\t', target output_path = params.get('generator_flags', {}).get( 'analyzer_output_path', None) if not output_path: print json.dumps(values) return try: f = open(output_path, 'w') f.write(json.dumps(values) + '\n') f.close() except IOError as e: print 'Error writing to output file', output_path, str(e) def _WasGypIncludeFileModified(params, files): """Returns true if one of the files in |files| is in the set of included files.""" if params['options'].includes: for include in params['options'].includes: if _ToGypPath(os.path.normpath(include)) in files: print 'Include file modified, assuming all changed', include return True return False def _NamesNotIn(names, mapping): """Returns a list of the values in |names| that are not in |mapping|.""" return [name for name in names if name not in mapping] def _LookupTargets(names, mapping): """Returns a list of the mapping[name] for each value in |names| that is in |mapping|.""" return [mapping[name] for name in names if name in mapping] def CalculateVariables(default_variables, params): """Calculate additional variables for use in the build (called by gyp).""" flavor = gyp.common.GetFlavor(params) if flavor == 'mac': default_variables.setdefault('OS', 'mac') elif flavor == 'win': default_variables.setdefault('OS', 'win') # Copy additional generator configuration data from VS, which is shared # by the Windows Ninja generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) else: operating_system = flavor if flavor == 'android': operating_system = 'linux' # Keep this legacy behavior for now. default_variables.setdefault('OS', operating_system) class TargetCalculator(object): """Calculates the matching test_targets and matching compile_targets.""" def __init__(self, files, additional_compile_target_names, test_target_names, data, target_list, target_dicts, toplevel_dir, build_files): self._additional_compile_target_names = set(additional_compile_target_names) self._test_target_names = set(test_target_names) self._name_to_target, self._changed_targets, self._root_targets = ( _GenerateTargets(data, target_list, target_dicts, toplevel_dir, frozenset(files), build_files)) self._unqualified_mapping, self.invalid_targets = ( _GetUnqualifiedToTargetMapping(self._name_to_target, self._supplied_target_names_no_all())) def _supplied_target_names(self): return self._additional_compile_target_names | self._test_target_names def _supplied_target_names_no_all(self): """Returns the supplied test targets without 'all'.""" result = self._supplied_target_names(); result.discard('all') return result def is_build_impacted(self): """Returns true if the supplied files impact the build at all.""" return self._changed_targets def find_matching_test_target_names(self): """Returns the set of output test targets.""" assert self.is_build_impacted() # Find the test targets first. 'all' is special cased to mean all the # root targets. To deal with all the supplied |test_targets| are expanded # to include the root targets during lookup. If any of the root targets # match, we remove it and replace it with 'all'. test_target_names_no_all = set(self._test_target_names) test_target_names_no_all.discard('all') test_targets_no_all = _LookupTargets(test_target_names_no_all, self._unqualified_mapping) test_target_names_contains_all = 'all' in self._test_target_names if test_target_names_contains_all: test_targets = [x for x in (set(test_targets_no_all) | set(self._root_targets))] else: test_targets = [x for x in test_targets_no_all] print 'supplied test_targets' for target_name in self._test_target_names: print '\t', target_name print 'found test_targets' for target in test_targets: print '\t', target.name print 'searching for matching test targets' matching_test_targets = _GetTargetsDependingOnMatchingTargets(test_targets) matching_test_targets_contains_all = (test_target_names_contains_all and set(matching_test_targets) & set(self._root_targets)) if matching_test_targets_contains_all: # Remove any of the targets for all that were not explicitly supplied, # 'all' is subsequentely added to the matching names below. matching_test_targets = [x for x in (set(matching_test_targets) & set(test_targets_no_all))] print 'matched test_targets' for target in matching_test_targets: print '\t', target.name matching_target_names = [gyp.common.ParseQualifiedTarget(target.name)[1] for target in matching_test_targets] if matching_test_targets_contains_all: matching_target_names.append('all') print '\tall' return matching_target_names def find_matching_compile_target_names(self): """Returns the set of output compile targets.""" assert self.is_build_impacted(); # Compile targets are found by searching up from changed targets. # Reset the visited status for _GetBuildTargets. for target in self._name_to_target.itervalues(): target.visited = False supplied_targets = _LookupTargets(self._supplied_target_names_no_all(), self._unqualified_mapping) if 'all' in self._supplied_target_names(): supplied_targets = [x for x in (set(supplied_targets) | set(self._root_targets))] print 'Supplied test_targets & compile_targets' for target in supplied_targets: print '\t', target.name print 'Finding compile targets' compile_targets = _GetCompileTargets(self._changed_targets, supplied_targets) return [gyp.common.ParseQualifiedTarget(target.name)[1] for target in compile_targets] def GenerateOutput(target_list, target_dicts, data, params): """Called by gyp as the final stage. Outputs results.""" config = Config() try: config.Init(params) if not config.files: raise Exception('Must specify files to analyze via config_path generator ' 'flag') toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir)) if debug: print 'toplevel_dir', toplevel_dir if _WasGypIncludeFileModified(params, config.files): result_dict = { 'status': all_changed_string, 'test_targets': list(config.test_target_names), 'compile_targets': list( config.additional_compile_target_names | config.test_target_names) } _WriteOutput(params, **result_dict) return calculator = TargetCalculator(config.files, config.additional_compile_target_names, config.test_target_names, data, target_list, target_dicts, toplevel_dir, params['build_files']) if not calculator.is_build_impacted(): result_dict = { 'status': no_dependency_string, 'test_targets': [], 'compile_targets': [] } if calculator.invalid_targets: result_dict['invalid_targets'] = calculator.invalid_targets _WriteOutput(params, **result_dict) return test_target_names = calculator.find_matching_test_target_names() compile_target_names = calculator.find_matching_compile_target_names() found_at_least_one_target = compile_target_names or test_target_names result_dict = { 'test_targets': test_target_names, 'status': found_dependency_string if found_at_least_one_target else no_dependency_string, 'compile_targets': list( set(compile_target_names) | set(test_target_names)) } if calculator.invalid_targets: result_dict['invalid_targets'] = calculator.invalid_targets _WriteOutput(params, **result_dict) except Exception as e: _WriteOutput(params, error=str(e))
gpl-3.0
xubenben/scikit-learn
sklearn/cluster/affinity_propagation_.py
224
10733
""" Algorithms for clustering : Meanshift, Affinity propagation and spectral clustering. """ # Author: Alexandre Gramfort alexandre.gramfort@inria.fr # Gael Varoquaux gael.varoquaux@normalesup.org # License: BSD 3 clause import numpy as np from ..base import BaseEstimator, ClusterMixin from ..utils import as_float_array, check_array from ..utils.validation import check_is_fitted from ..metrics import euclidean_distances from ..metrics import pairwise_distances_argmin def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200, damping=0.5, copy=True, verbose=False, return_n_iter=False): """Perform Affinity Propagation Clustering of data Read more in the :ref:`User Guide <affinity_propagation>`. Parameters ---------- S : array-like, shape (n_samples, n_samples) Matrix of similarities between points preference : array-like, shape (n_samples,) or float, optional Preferences for each point - points with larger values of preferences are more likely to be chosen as exemplars. The number of exemplars, i.e. of clusters, is influenced by the input preferences value. If the preferences are not passed as arguments, they will be set to the median of the input similarities (resulting in a moderate number of clusters). For a smaller amount of clusters, this can be set to the minimum value of the similarities. convergence_iter : int, optional, default: 15 Number of iterations with no change in the number of estimated clusters that stops the convergence. max_iter : int, optional, default: 200 Maximum number of iterations damping : float, optional, default: 0.5 Damping factor between 0.5 and 1. copy : boolean, optional, default: True If copy is False, the affinity matrix is modified inplace by the algorithm, for memory efficiency verbose : boolean, optional, default: False The verbosity level return_n_iter : bool, default False Whether or not to return the number of iterations. Returns ------- cluster_centers_indices : array, shape (n_clusters,) index of clusters centers labels : array, shape (n_samples,) cluster labels for each point n_iter : int number of iterations run. Returned only if `return_n_iter` is set to True. Notes ----- See examples/cluster/plot_affinity_propagation.py for an example. References ---------- Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages Between Data Points", Science Feb. 2007 """ S = as_float_array(S, copy=copy) n_samples = S.shape[0] if S.shape[0] != S.shape[1]: raise ValueError("S must be a square array (shape=%s)" % repr(S.shape)) if preference is None: preference = np.median(S) if damping < 0.5 or damping >= 1: raise ValueError('damping must be >= 0.5 and < 1') random_state = np.random.RandomState(0) # Place preference on the diagonal of S S.flat[::(n_samples + 1)] = preference A = np.zeros((n_samples, n_samples)) R = np.zeros((n_samples, n_samples)) # Initialize messages # Intermediate results tmp = np.zeros((n_samples, n_samples)) # Remove degeneracies S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) * random_state.randn(n_samples, n_samples)) # Execute parallel affinity propagation updates e = np.zeros((n_samples, convergence_iter)) ind = np.arange(n_samples) for it in range(max_iter): # tmp = A + S; compute responsibilities np.add(A, S, tmp) I = np.argmax(tmp, axis=1) Y = tmp[ind, I] # np.max(A + S, axis=1) tmp[ind, I] = -np.inf Y2 = np.max(tmp, axis=1) # tmp = Rnew np.subtract(S, Y[:, None], tmp) tmp[ind, I] = S[ind, I] - Y2 # Damping tmp *= 1 - damping R *= damping R += tmp # tmp = Rp; compute availabilities np.maximum(R, 0, tmp) tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1] # tmp = -Anew tmp -= np.sum(tmp, axis=0) dA = np.diag(tmp).copy() tmp.clip(0, np.inf, tmp) tmp.flat[::n_samples + 1] = dA # Damping tmp *= 1 - damping A *= damping A -= tmp # Check for convergence E = (np.diag(A) + np.diag(R)) > 0 e[:, it % convergence_iter] = E K = np.sum(E, axis=0) if it >= convergence_iter: se = np.sum(e, axis=1) unconverged = (np.sum((se == convergence_iter) + (se == 0)) != n_samples) if (not unconverged and (K > 0)) or (it == max_iter): if verbose: print("Converged after %d iterations." % it) break else: if verbose: print("Did not converge") I = np.where(np.diag(A + R) > 0)[0] K = I.size # Identify exemplars if K > 0: c = np.argmax(S[:, I], axis=1) c[I] = np.arange(K) # Identify clusters # Refine the final set of exemplars and clusters and return results for k in range(K): ii = np.where(c == k)[0] j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0)) I[k] = ii[j] c = np.argmax(S[:, I], axis=1) c[I] = np.arange(K) labels = I[c] # Reduce labels to a sorted, gapless, list cluster_centers_indices = np.unique(labels) labels = np.searchsorted(cluster_centers_indices, labels) else: labels = np.empty((n_samples, 1)) cluster_centers_indices = None labels.fill(np.nan) if return_n_iter: return cluster_centers_indices, labels, it + 1 else: return cluster_centers_indices, labels ############################################################################### class AffinityPropagation(BaseEstimator, ClusterMixin): """Perform Affinity Propagation Clustering of data. Read more in the :ref:`User Guide <affinity_propagation>`. Parameters ---------- damping : float, optional, default: 0.5 Damping factor between 0.5 and 1. convergence_iter : int, optional, default: 15 Number of iterations with no change in the number of estimated clusters that stops the convergence. max_iter : int, optional, default: 200 Maximum number of iterations. copy : boolean, optional, default: True Make a copy of input data. preference : array-like, shape (n_samples,) or float, optional Preferences for each point - points with larger values of preferences are more likely to be chosen as exemplars. The number of exemplars, ie of clusters, is influenced by the input preferences value. If the preferences are not passed as arguments, they will be set to the median of the input similarities. affinity : string, optional, default=``euclidean`` Which affinity to use. At the moment ``precomputed`` and ``euclidean`` are supported. ``euclidean`` uses the negative squared euclidean distance between points. verbose : boolean, optional, default: False Whether to be verbose. Attributes ---------- cluster_centers_indices_ : array, shape (n_clusters,) Indices of cluster centers cluster_centers_ : array, shape (n_clusters, n_features) Cluster centers (if affinity != ``precomputed``). labels_ : array, shape (n_samples,) Labels of each point affinity_matrix_ : array, shape (n_samples, n_samples) Stores the affinity matrix used in ``fit``. n_iter_ : int Number of iterations taken to converge. Notes ----- See examples/cluster/plot_affinity_propagation.py for an example. The algorithmic complexity of affinity propagation is quadratic in the number of points. References ---------- Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages Between Data Points", Science Feb. 2007 """ def __init__(self, damping=.5, max_iter=200, convergence_iter=15, copy=True, preference=None, affinity='euclidean', verbose=False): self.damping = damping self.max_iter = max_iter self.convergence_iter = convergence_iter self.copy = copy self.verbose = verbose self.preference = preference self.affinity = affinity @property def _pairwise(self): return self.affinity == "precomputed" def fit(self, X, y=None): """ Create affinity matrix from negative euclidean distances, then apply affinity propagation clustering. Parameters ---------- X: array-like, shape (n_samples, n_features) or (n_samples, n_samples) Data matrix or, if affinity is ``precomputed``, matrix of similarities / affinities. """ X = check_array(X, accept_sparse='csr') if self.affinity == "precomputed": self.affinity_matrix_ = X elif self.affinity == "euclidean": self.affinity_matrix_ = -euclidean_distances(X, squared=True) else: raise ValueError("Affinity must be 'precomputed' or " "'euclidean'. Got %s instead" % str(self.affinity)) self.cluster_centers_indices_, self.labels_, self.n_iter_ = \ affinity_propagation( self.affinity_matrix_, self.preference, max_iter=self.max_iter, convergence_iter=self.convergence_iter, damping=self.damping, copy=self.copy, verbose=self.verbose, return_n_iter=True) if self.affinity != "precomputed": self.cluster_centers_ = X[self.cluster_centers_indices_].copy() return self def predict(self, X): """Predict the closest cluster each sample in X belongs to. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data to predict. Returns ------- labels : array, shape (n_samples,) Index of the cluster each sample belongs to. """ check_is_fitted(self, "cluster_centers_indices_") if not hasattr(self, "cluster_centers_"): raise ValueError("Predict method is not supported when " "affinity='precomputed'.") return pairwise_distances_argmin(X, self.cluster_centers_)
bsd-3-clause
KurtDeGreeff/infernal-twin
build/pip/build/lib.linux-i686-2.7/pip/_vendor/html5lib/treewalkers/dom.py
505
1421
from __future__ import absolute_import, division, unicode_literals from xml.dom import Node from . import _base class TreeWalker(_base.NonRecursiveTreeWalker): def getNodeDetails(self, node): if node.nodeType == Node.DOCUMENT_TYPE_NODE: return _base.DOCTYPE, node.name, node.publicId, node.systemId elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): return _base.TEXT, node.nodeValue elif node.nodeType == Node.ELEMENT_NODE: attrs = {} for attr in list(node.attributes.keys()): attr = node.getAttributeNode(attr) if attr.namespaceURI: attrs[(attr.namespaceURI, attr.localName)] = attr.value else: attrs[(None, attr.name)] = attr.value return (_base.ELEMENT, node.namespaceURI, node.nodeName, attrs, node.hasChildNodes()) elif node.nodeType == Node.COMMENT_NODE: return _base.COMMENT, node.nodeValue elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE): return (_base.DOCUMENT,) else: return _base.UNKNOWN, node.nodeType def getFirstChild(self, node): return node.firstChild def getNextSibling(self, node): return node.nextSibling def getParentNode(self, node): return node.parentNode
gpl-3.0
annarev/tensorflow
tensorflow/python/debug/wrappers/local_cli_wrapper.py
14
24606
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Debugger Wrapper Session Consisting of a Local Curses-based CLI.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import sys import tempfile # Google-internal import(s). from tensorflow.python.debug.cli import analyzer_cli from tensorflow.python.debug.cli import cli_config from tensorflow.python.debug.cli import cli_shared from tensorflow.python.debug.cli import command_parser from tensorflow.python.debug.cli import debugger_cli_common from tensorflow.python.debug.cli import profile_analyzer_cli from tensorflow.python.debug.cli import ui_factory from tensorflow.python.debug.lib import common from tensorflow.python.debug.lib import debug_data from tensorflow.python.debug.wrappers import framework from tensorflow.python.lib.io import file_io _DUMP_ROOT_PREFIX = "tfdbg_" # TODO(donglin) Remove use_random_config_path after b/137652456 is fixed. class LocalCLIDebugWrapperSession(framework.BaseDebugWrapperSession): """Concrete subclass of BaseDebugWrapperSession implementing a local CLI. This class has all the methods that a `session.Session` object has, in order to support debugging with minimal code changes. Invoking its `run()` method will launch the command-line interface (CLI) of tfdbg. """ def __init__(self, sess, dump_root=None, log_usage=True, ui_type="curses", thread_name_filter=None, config_file_path=False): """Constructor of LocalCLIDebugWrapperSession. Args: sess: The TensorFlow `Session` object being wrapped. dump_root: (`str`) optional path to the dump root directory. Must be a directory that does not exist or an empty directory. If the directory does not exist, it will be created by the debugger core during debug `run()` calls and removed afterwards. If `None`, the debug dumps will be at tfdbg_<random_string> under the system temp directory. log_usage: (`bool`) whether the usage of this class is to be logged. ui_type: (`str`) requested UI type. Currently supported: (curses | readline) thread_name_filter: Regular-expression white list for thread name. See the doc of `BaseDebugWrapperSession` for details. config_file_path: Optional override to the default configuration file path, which is at `${HOME}/.tfdbg_config`. Raises: ValueError: If dump_root is an existing and non-empty directory or if dump_root is a file. """ if log_usage: pass # No logging for open-source. framework.BaseDebugWrapperSession.__init__( self, sess, thread_name_filter=thread_name_filter) if not dump_root: self._dump_root = tempfile.mktemp(prefix=_DUMP_ROOT_PREFIX) else: dump_root = os.path.expanduser(dump_root) if os.path.isfile(dump_root): raise ValueError("dump_root path points to a file: %s" % dump_root) elif os.path.isdir(dump_root) and os.listdir(dump_root): raise ValueError("dump_root path points to a non-empty directory: %s" % dump_root) self._dump_root = dump_root self._initialize_argparsers() # Registered tensor filters. self._tensor_filters = {} # Register frequently-used filter(s). self.add_tensor_filter("has_inf_or_nan", debug_data.has_inf_or_nan) # Below are the state variables of this wrapper object. # _active_tensor_filter: what (if any) tensor filter is in effect. If such # a filter is in effect, this object will call run() method of the # underlying TensorFlow Session object until the filter passes. This is # activated by the "-f" flag of the "run" command. # _run_through_times: keeps track of how many times the wrapper needs to # run through without stopping at the run-end CLI. It is activated by the # "-t" option of the "run" command. # _skip_debug: keeps track of whether the current run should be executed # without debugging. It is activated by the "-n" option of the "run" # command. # # _run_start_response: keeps track what OnRunStartResponse the wrapper # should return at the next run-start callback. If this information is # unavailable (i.e., is None), the run-start CLI will be launched to ask # the user. This is the case, e.g., right before the first run starts. self._active_tensor_filter = None self._active_filter_exclude_node_names = None self._active_tensor_filter_run_start_response = None self._run_through_times = 1 self._skip_debug = False self._run_start_response = None self._is_run_start = True self._ui_type = ui_type self._config = None if config_file_path: self._config = cli_config.CLIConfig(config_file_path=config_file_path) def _is_disk_usage_reset_each_run(self): # The dumped tensors are all cleaned up after every Session.run # in a command-line wrapper. return True def _initialize_argparsers(self): self._argparsers = {} ap = argparse.ArgumentParser( description="Run through, with or without debug tensor watching.", usage=argparse.SUPPRESS) ap.add_argument( "-t", "--times", dest="times", type=int, default=1, help="How many Session.run() calls to proceed with.") ap.add_argument( "-n", "--no_debug", dest="no_debug", action="store_true", help="Run through without debug tensor watching.") ap.add_argument( "-f", "--till_filter_pass", dest="till_filter_pass", type=str, default="", help="Run until a tensor in the graph passes the specified filter.") ap.add_argument( "-fenn", "--filter_exclude_node_names", dest="filter_exclude_node_names", type=str, default="", help="When applying the tensor filter, exclude node with names " "matching the regular expression. Applicable only if --tensor_filter " "or -f is used.") ap.add_argument( "--node_name_filter", dest="node_name_filter", type=str, default="", help="Regular-expression filter for node names to be watched in the " "run, e.g., loss, reshape.*") ap.add_argument( "--op_type_filter", dest="op_type_filter", type=str, default="", help="Regular-expression filter for op type to be watched in the run, " "e.g., (MatMul|Add), Variable.*") ap.add_argument( "--tensor_dtype_filter", dest="tensor_dtype_filter", type=str, default="", help="Regular-expression filter for tensor dtype to be watched in the " "run, e.g., (float32|float64), int.*") ap.add_argument( "-p", "--profile", dest="profile", action="store_true", help="Run and profile TensorFlow graph execution.") self._argparsers["run"] = ap ap = argparse.ArgumentParser( description="Display information about this Session.run() call.", usage=argparse.SUPPRESS) self._argparsers["run_info"] = ap self._argparsers["print_feed"] = command_parser.get_print_tensor_argparser( "Print the value of a feed in feed_dict.") def add_tensor_filter(self, filter_name, tensor_filter): """Add a tensor filter. Args: filter_name: (`str`) name of the filter. tensor_filter: (`callable`) the filter callable. See the doc string of `DebugDumpDir.find()` for more details about its signature. """ self._tensor_filters[filter_name] = tensor_filter def on_session_init(self, request): """Overrides on-session-init callback. Args: request: An instance of `OnSessionInitRequest`. Returns: An instance of `OnSessionInitResponse`. """ return framework.OnSessionInitResponse( framework.OnSessionInitAction.PROCEED) def on_run_start(self, request): """Overrides on-run-start callback. Args: request: An instance of `OnRunStartRequest`. Returns: An instance of `OnRunStartResponse`. """ self._is_run_start = True self._update_run_calls_state( request.run_call_count, request.fetches, request.feed_dict, is_callable_runner=request.is_callable_runner) if self._active_tensor_filter: # If we are running until a filter passes, we just need to keep running # with the previous `OnRunStartResponse`. return self._active_tensor_filter_run_start_response self._exit_if_requested_by_user() if self._run_call_count > 1 and not self._skip_debug: if self._run_through_times > 0: # Just run through without debugging. return framework.OnRunStartResponse( framework.OnRunStartAction.NON_DEBUG_RUN, []) elif self._run_through_times == 0: # It is the run at which the run-end CLI will be launched: activate # debugging. return (self._run_start_response or framework.OnRunStartResponse( framework.OnRunStartAction.DEBUG_RUN, self._get_run_debug_urls())) if self._run_start_response is None: self._prep_cli_for_run_start() self._run_start_response = self._launch_cli() if self._active_tensor_filter: self._active_tensor_filter_run_start_response = self._run_start_response if self._run_through_times > 1: self._run_through_times -= 1 self._exit_if_requested_by_user() return self._run_start_response def _exit_if_requested_by_user(self): if self._run_start_response == debugger_cli_common.EXPLICIT_USER_EXIT: # Explicit user "exit" command leads to sys.exit(1). print( "Note: user exited from debugger CLI: Calling sys.exit(1).", file=sys.stderr) sys.exit(1) def _prep_cli_for_run_start(self): """Prepare (but not launch) the CLI for run-start.""" self._run_cli = ui_factory.get_ui(self._ui_type, config=self._config) help_intro = debugger_cli_common.RichTextLines([]) if self._run_call_count == 1: # Show logo at the onset of the first run. help_intro.extend(cli_shared.get_tfdbg_logo()) help_intro.extend(debugger_cli_common.get_tensorflow_version_lines()) help_intro.extend(debugger_cli_common.RichTextLines("Upcoming run:")) help_intro.extend(self._run_info) self._run_cli.set_help_intro(help_intro) # Create initial screen output detailing the run. self._title = "run-start: " + self._run_description self._init_command = "run_info" self._title_color = "blue_on_white" def on_run_end(self, request): """Overrides on-run-end callback. Actions taken: 1) Load the debug dump. 2) Bring up the Analyzer CLI. Args: request: An instance of OnSessionInitRequest. Returns: An instance of OnSessionInitResponse. """ self._is_run_start = False if request.performed_action == framework.OnRunStartAction.DEBUG_RUN: partition_graphs = None if request.run_metadata and request.run_metadata.partition_graphs: partition_graphs = request.run_metadata.partition_graphs elif request.client_graph_def: partition_graphs = [request.client_graph_def] if request.tf_error and not os.path.isdir(self._dump_root): # It is possible that the dump root may not exist due to errors that # have occurred prior to graph execution (e.g., invalid device # assignments), in which case we will just raise the exception as the # unwrapped Session does. raise request.tf_error debug_dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=partition_graphs) debug_dump.set_python_graph(self._sess.graph) passed_filter = None passed_filter_exclude_node_names = None if self._active_tensor_filter: if not debug_dump.find( self._tensor_filters[self._active_tensor_filter], first_n=1, exclude_node_names=self._active_filter_exclude_node_names): # No dumped tensor passes the filter in this run. Clean up the dump # directory and move on. self._remove_dump_root() return framework.OnRunEndResponse() else: # Some dumped tensor(s) from this run passed the filter. passed_filter = self._active_tensor_filter passed_filter_exclude_node_names = ( self._active_filter_exclude_node_names) self._active_tensor_filter = None self._active_filter_exclude_node_names = None self._prep_debug_cli_for_run_end( debug_dump, request.tf_error, passed_filter, passed_filter_exclude_node_names) self._run_start_response = self._launch_cli() # Clean up the dump generated by this run. self._remove_dump_root() elif request.performed_action == framework.OnRunStartAction.PROFILE_RUN: self._prep_profile_cli_for_run_end(self._sess.graph, request.run_metadata) self._run_start_response = self._launch_cli() else: # No debug information to show following a non-debug run() call. self._run_start_response = None # Return placeholder response that currently holds no additional # information. return framework.OnRunEndResponse() def _remove_dump_root(self): if os.path.isdir(self._dump_root): file_io.delete_recursively(self._dump_root) def _prep_debug_cli_for_run_end(self, debug_dump, tf_error, passed_filter, passed_filter_exclude_node_names): """Prepare (but not launch) CLI for run-end, with debug dump from the run. Args: debug_dump: (debug_data.DebugDumpDir) The debug dump directory from this run. tf_error: (None or OpError) OpError that happened during the run() call (if any). passed_filter: (None or str) Name of the tensor filter that just passed and caused the preparation of this run-end CLI (if any). passed_filter_exclude_node_names: (None or str) Regular expression used with the tensor filter to exclude ops with names matching the regular expression. """ if tf_error: help_intro = cli_shared.get_error_intro(tf_error) self._init_command = "help" self._title_color = "red_on_white" else: help_intro = None self._init_command = "lt" self._title_color = "black_on_white" if passed_filter is not None: # Some dumped tensor(s) from this run passed the filter. self._init_command = "lt -f %s" % passed_filter if passed_filter_exclude_node_names: self._init_command += (" --filter_exclude_node_names %s" % passed_filter_exclude_node_names) self._title_color = "red_on_white" self._run_cli = analyzer_cli.create_analyzer_ui( debug_dump, self._tensor_filters, ui_type=self._ui_type, on_ui_exit=self._remove_dump_root, config=self._config) # Get names of all dumped tensors. dumped_tensor_names = [] for datum in debug_dump.dumped_tensor_data: dumped_tensor_names.append("%s:%d" % (datum.node_name, datum.output_slot)) # Tab completions for command "print_tensors". self._run_cli.register_tab_comp_context(["print_tensor", "pt"], dumped_tensor_names) # Tab completion for commands "node_info", "list_inputs" and # "list_outputs". The list comprehension is used below because nodes() # output can be unicodes and they need to be converted to strs. self._run_cli.register_tab_comp_context( ["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"], [str(node_name) for node_name in debug_dump.nodes()]) # TODO(cais): Reduce API surface area for aliases vis-a-vis tab # completion contexts and registered command handlers. self._title = "run-end: " + self._run_description if help_intro: self._run_cli.set_help_intro(help_intro) def _prep_profile_cli_for_run_end(self, py_graph, run_metadata): self._init_command = "lp" self._run_cli = profile_analyzer_cli.create_profiler_ui( py_graph, run_metadata, ui_type=self._ui_type, config=self._run_cli.config) self._title = "run-end (profiler mode): " + self._run_description def _launch_cli(self): """Launch the interactive command-line interface. Returns: The OnRunStartResponse specified by the user using the "run" command. """ self._register_this_run_info(self._run_cli) response = self._run_cli.run_ui( init_command=self._init_command, title=self._title, title_color=self._title_color) return response def _run_info_handler(self, args, screen_info=None): output = debugger_cli_common.RichTextLines([]) if self._run_call_count == 1: output.extend(cli_shared.get_tfdbg_logo()) output.extend(debugger_cli_common.get_tensorflow_version_lines()) output.extend(self._run_info) if (not self._is_run_start and debugger_cli_common.MAIN_MENU_KEY in output.annotations): menu = output.annotations[debugger_cli_common.MAIN_MENU_KEY] if "list_tensors" not in menu.captions(): menu.insert( 0, debugger_cli_common.MenuItem("list_tensors", "list_tensors")) return output def _print_feed_handler(self, args, screen_info=None): np_printoptions = cli_shared.numpy_printoptions_from_screen_info( screen_info) if not self._feed_dict: return cli_shared.error( "The feed_dict of the current run is None or empty.") parsed = self._argparsers["print_feed"].parse_args(args) tensor_name, tensor_slicing = ( command_parser.parse_tensor_name_with_slicing(parsed.tensor_name)) feed_key = None feed_value = None for key in self._feed_dict: key_name = common.get_graph_element_name(key) if key_name == tensor_name: feed_key = key_name feed_value = self._feed_dict[key] break if feed_key is None: return cli_shared.error( "The feed_dict of the current run does not contain the key %s" % tensor_name) else: return cli_shared.format_tensor( feed_value, feed_key + " (feed)", np_printoptions, print_all=parsed.print_all, tensor_slicing=tensor_slicing, highlight_options=cli_shared.parse_ranges_highlight(parsed.ranges), include_numeric_summary=parsed.numeric_summary) def _run_handler(self, args, screen_info=None): """Command handler for "run" command during on-run-start.""" del screen_info # Currently unused. parsed = self._argparsers["run"].parse_args(args) parsed.node_name_filter = parsed.node_name_filter or None parsed.op_type_filter = parsed.op_type_filter or None parsed.tensor_dtype_filter = parsed.tensor_dtype_filter or None if parsed.filter_exclude_node_names and not parsed.till_filter_pass: raise ValueError( "The --filter_exclude_node_names (or -feon) flag is valid only if " "the --till_filter_pass (or -f) flag is used.") if parsed.profile: raise debugger_cli_common.CommandLineExit( exit_token=framework.OnRunStartResponse( framework.OnRunStartAction.PROFILE_RUN, [])) self._skip_debug = parsed.no_debug self._run_through_times = parsed.times if parsed.times > 1 or parsed.no_debug: # If requested -t times > 1, the very next run will be a non-debug run. action = framework.OnRunStartAction.NON_DEBUG_RUN debug_urls = [] else: action = framework.OnRunStartAction.DEBUG_RUN debug_urls = self._get_run_debug_urls() run_start_response = framework.OnRunStartResponse( action, debug_urls, node_name_regex_allowlist=parsed.node_name_filter, op_type_regex_allowlist=parsed.op_type_filter, tensor_dtype_regex_allowlist=parsed.tensor_dtype_filter) if parsed.till_filter_pass: # For the run-till-filter-pass (run -f) mode, use the DEBUG_RUN # option to access the intermediate tensors, and set the corresponding # state flag of the class itself to True. if parsed.till_filter_pass in self._tensor_filters: action = framework.OnRunStartAction.DEBUG_RUN self._active_tensor_filter = parsed.till_filter_pass self._active_filter_exclude_node_names = ( parsed.filter_exclude_node_names) self._active_tensor_filter_run_start_response = run_start_response else: # Handle invalid filter name. return debugger_cli_common.RichTextLines( ["ERROR: tensor filter \"%s\" does not exist." % parsed.till_filter_pass]) # Raise CommandLineExit exception to cause the CLI to exit. raise debugger_cli_common.CommandLineExit(exit_token=run_start_response) def _register_this_run_info(self, curses_cli): curses_cli.register_command_handler( "run", self._run_handler, self._argparsers["run"].format_help(), prefix_aliases=["r"]) curses_cli.register_command_handler( "run_info", self._run_info_handler, self._argparsers["run_info"].format_help(), prefix_aliases=["ri"]) curses_cli.register_command_handler( "print_feed", self._print_feed_handler, self._argparsers["print_feed"].format_help(), prefix_aliases=["pf"]) if self._tensor_filters: # Register tab completion for the filter names. curses_cli.register_tab_comp_context(["run", "r"], list(self._tensor_filters.keys())) if self._feed_dict and hasattr(self._feed_dict, "keys"): # Register tab completion for feed_dict keys. feed_keys = [common.get_graph_element_name(key) for key in self._feed_dict.keys()] curses_cli.register_tab_comp_context(["print_feed", "pf"], feed_keys) def _get_run_debug_urls(self): """Get the debug_urls value for the current run() call. Returns: debug_urls: (list of str) Debug URLs for the current run() call. Currently, the list consists of only one URL that is a file:// URL. """ return ["file://" + self._dump_root] def _update_run_calls_state(self, run_call_count, fetches, feed_dict, is_callable_runner=False): """Update the internal state with regard to run() call history. Args: run_call_count: (int) Number of run() calls that have occurred. fetches: a node/tensor or a list of node/tensor that are the fetches of the run() call. This is the same as the fetches argument to the run() call. feed_dict: None of a dict. This is the feed_dict argument to the run() call. is_callable_runner: (bool) whether a runner returned by Session.make_callable is being run. """ self._run_call_count = run_call_count self._feed_dict = feed_dict self._run_description = cli_shared.get_run_short_description( run_call_count, fetches, feed_dict, is_callable_runner=is_callable_runner) self._run_through_times -= 1 self._run_info = cli_shared.get_run_start_intro( run_call_count, fetches, feed_dict, self._tensor_filters, is_callable_runner=is_callable_runner)
apache-2.0
miselin/grpc
src/python/grpcio/tests/_result.py
20
16552
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import import collections import itertools import traceback import unittest from xml.etree import ElementTree import coverage from six import moves from tests import _loader class CaseResult(collections.namedtuple('CaseResult', [ 'id', 'name', 'kind', 'stdout', 'stderr', 'skip_reason', 'traceback'])): """A serializable result of a single test case. Attributes: id (object): Any serializable object used to denote the identity of this test case. name (str or None): A human-readable name of the test case. kind (CaseResult.Kind): The kind of test result. stdout (object or None): Output on stdout, or None if nothing was captured. stderr (object or None): Output on stderr, or None if nothing was captured. skip_reason (object or None): The reason the test was skipped. Must be something if self.kind is CaseResult.Kind.SKIP, else None. traceback (object or None): The traceback of the test. Must be something if self.kind is CaseResult.Kind.{ERROR, FAILURE, EXPECTED_FAILURE}, else None. """ class Kind: UNTESTED = 'untested' RUNNING = 'running' ERROR = 'error' FAILURE = 'failure' SUCCESS = 'success' SKIP = 'skip' EXPECTED_FAILURE = 'expected failure' UNEXPECTED_SUCCESS = 'unexpected success' def __new__(cls, id=None, name=None, kind=None, stdout=None, stderr=None, skip_reason=None, traceback=None): """Helper keyword constructor for the namedtuple. See this class' attributes for information on the arguments.""" assert id is not None assert name is None or isinstance(name, str) if kind is CaseResult.Kind.UNTESTED: pass elif kind is CaseResult.Kind.RUNNING: pass elif kind is CaseResult.Kind.ERROR: assert traceback is not None elif kind is CaseResult.Kind.FAILURE: assert traceback is not None elif kind is CaseResult.Kind.SUCCESS: pass elif kind is CaseResult.Kind.SKIP: assert skip_reason is not None elif kind is CaseResult.Kind.EXPECTED_FAILURE: assert traceback is not None elif kind is CaseResult.Kind.UNEXPECTED_SUCCESS: pass else: assert False return super(cls, CaseResult).__new__( cls, id, name, kind, stdout, stderr, skip_reason, traceback) def updated(self, name=None, kind=None, stdout=None, stderr=None, skip_reason=None, traceback=None): """Get a new validated CaseResult with the fields updated. See this class' attributes for information on the arguments.""" name = self.name if name is None else name kind = self.kind if kind is None else kind stdout = self.stdout if stdout is None else stdout stderr = self.stderr if stderr is None else stderr skip_reason = self.skip_reason if skip_reason is None else skip_reason traceback = self.traceback if traceback is None else traceback return CaseResult(id=self.id, name=name, kind=kind, stdout=stdout, stderr=stderr, skip_reason=skip_reason, traceback=traceback) class AugmentedResult(unittest.TestResult): """unittest.Result that keeps track of additional information. Uses CaseResult objects to store test-case results, providing additional information beyond that of the standard Python unittest library, such as standard output. Attributes: id_map (callable): A unary callable mapping unittest.TestCase objects to unique identifiers. cases (dict): A dictionary mapping from the identifiers returned by id_map to CaseResult objects corresponding to those IDs. """ def __init__(self, id_map): """Initialize the object with an identifier mapping. Arguments: id_map (callable): Corresponds to the attribute `id_map`.""" super(AugmentedResult, self).__init__() self.id_map = id_map self.cases = None def startTestRun(self): """See unittest.TestResult.startTestRun.""" super(AugmentedResult, self).startTestRun() self.cases = dict() def stopTestRun(self): """See unittest.TestResult.stopTestRun.""" super(AugmentedResult, self).stopTestRun() def startTest(self, test): """See unittest.TestResult.startTest.""" super(AugmentedResult, self).startTest(test) case_id = self.id_map(test) self.cases[case_id] = CaseResult( id=case_id, name=test.id(), kind=CaseResult.Kind.RUNNING) def addError(self, test, error): """See unittest.TestResult.addError.""" super(AugmentedResult, self).addError(test, error) case_id = self.id_map(test) self.cases[case_id] = self.cases[case_id].updated( kind=CaseResult.Kind.ERROR, traceback=error) def addFailure(self, test, error): """See unittest.TestResult.addFailure.""" super(AugmentedResult, self).addFailure(test, error) case_id = self.id_map(test) self.cases[case_id] = self.cases[case_id].updated( kind=CaseResult.Kind.FAILURE, traceback=error) def addSuccess(self, test): """See unittest.TestResult.addSuccess.""" super(AugmentedResult, self).addSuccess(test) case_id = self.id_map(test) self.cases[case_id] = self.cases[case_id].updated( kind=CaseResult.Kind.SUCCESS) def addSkip(self, test, reason): """See unittest.TestResult.addSkip.""" super(AugmentedResult, self).addSkip(test, reason) case_id = self.id_map(test) self.cases[case_id] = self.cases[case_id].updated( kind=CaseResult.Kind.SKIP, skip_reason=reason) def addExpectedFailure(self, test, error): """See unittest.TestResult.addExpectedFailure.""" super(AugmentedResult, self).addExpectedFailure(test, error) case_id = self.id_map(test) self.cases[case_id] = self.cases[case_id].updated( kind=CaseResult.Kind.EXPECTED_FAILURE, traceback=error) def addUnexpectedSuccess(self, test): """See unittest.TestResult.addUnexpectedSuccess.""" super(AugmentedResult, self).addUnexpectedSuccess(test) case_id = self.id_map(test) self.cases[case_id] = self.cases[case_id].updated( kind=CaseResult.Kind.UNEXPECTED_SUCCESS) def set_output(self, test, stdout, stderr): """Set the output attributes for the CaseResult corresponding to a test. Args: test (unittest.TestCase): The TestCase to set the outputs of. stdout (str): Output from stdout to assign to self.id_map(test). stderr (str): Output from stderr to assign to self.id_map(test). """ case_id = self.id_map(test) self.cases[case_id] = self.cases[case_id].updated( stdout=stdout.decode(), stderr=stderr.decode()) def augmented_results(self, filter): """Convenience method to retrieve filtered case results. Args: filter (callable): A unary predicate to filter over CaseResult objects. """ return (self.cases[case_id] for case_id in self.cases if filter(self.cases[case_id])) class CoverageResult(AugmentedResult): """Extension to AugmentedResult adding coverage.py support per test.\ Attributes: coverage_context (coverage.Coverage): coverage.py management object. """ def __init__(self, id_map): """See AugmentedResult.__init__.""" super(CoverageResult, self).__init__(id_map=id_map) self.coverage_context = None def startTest(self, test): """See unittest.TestResult.startTest. Additionally initializes and begins code coverage tracking.""" super(CoverageResult, self).startTest(test) self.coverage_context = coverage.Coverage(data_suffix=True) self.coverage_context.start() def stopTest(self, test): """See unittest.TestResult.stopTest. Additionally stops and deinitializes code coverage tracking.""" super(CoverageResult, self).stopTest(test) self.coverage_context.stop() self.coverage_context.save() self.coverage_context = None def stopTestRun(self): """See unittest.TestResult.stopTestRun.""" super(CoverageResult, self).stopTestRun() # TODO(atash): Dig deeper into why the following line fails to properly # combine coverage data from the Cython plugin. #coverage.Coverage().combine() class _Colors: """Namespaced constants for terminal color magic numbers.""" HEADER = '\033[95m' INFO = '\033[94m' OK = '\033[92m' WARN = '\033[93m' FAIL = '\033[91m' BOLD = '\033[1m' UNDERLINE = '\033[4m' END = '\033[0m' class TerminalResult(CoverageResult): """Extension to CoverageResult adding basic terminal reporting.""" def __init__(self, out, id_map): """Initialize the result object. Args: out (file-like): Output file to which terminal-colored live results will be written. id_map (callable): See AugmentedResult.__init__. """ super(TerminalResult, self).__init__(id_map=id_map) self.out = out def startTestRun(self): """See unittest.TestResult.startTestRun.""" super(TerminalResult, self).startTestRun() self.out.write( _Colors.HEADER + 'Testing gRPC Python...\n' + _Colors.END) def stopTestRun(self): """See unittest.TestResult.stopTestRun.""" super(TerminalResult, self).stopTestRun() self.out.write(summary(self)) self.out.flush() def addError(self, test, error): """See unittest.TestResult.addError.""" super(TerminalResult, self).addError(test, error) self.out.write( _Colors.FAIL + 'ERROR {}\n'.format(test.id()) + _Colors.END) self.out.flush() def addFailure(self, test, error): """See unittest.TestResult.addFailure.""" super(TerminalResult, self).addFailure(test, error) self.out.write( _Colors.FAIL + 'FAILURE {}\n'.format(test.id()) + _Colors.END) self.out.flush() def addSuccess(self, test): """See unittest.TestResult.addSuccess.""" super(TerminalResult, self).addSuccess(test) self.out.write( _Colors.OK + 'SUCCESS {}\n'.format(test.id()) + _Colors.END) self.out.flush() def addSkip(self, test, reason): """See unittest.TestResult.addSkip.""" super(TerminalResult, self).addSkip(test, reason) self.out.write( _Colors.INFO + 'SKIP {}\n'.format(test.id()) + _Colors.END) self.out.flush() def addExpectedFailure(self, test, error): """See unittest.TestResult.addExpectedFailure.""" super(TerminalResult, self).addExpectedFailure(test, error) self.out.write( _Colors.INFO + 'FAILURE_OK {}\n'.format(test.id()) + _Colors.END) self.out.flush() def addUnexpectedSuccess(self, test): """See unittest.TestResult.addUnexpectedSuccess.""" super(TerminalResult, self).addUnexpectedSuccess(test) self.out.write( _Colors.INFO + 'UNEXPECTED_OK {}\n'.format(test.id()) + _Colors.END) self.out.flush() def _traceback_string(type, value, trace): """Generate a descriptive string of a Python exception traceback. Args: type (class): The type of the exception. value (Exception): The value of the exception. trace (traceback): Traceback of the exception. Returns: str: Formatted exception descriptive string. """ buffer = moves.cStringIO() traceback.print_exception(type, value, trace, file=buffer) return buffer.getvalue() def summary(result): """A summary string of a result object. Args: result (AugmentedResult): The result object to get the summary of. Returns: str: The summary string. """ assert isinstance(result, AugmentedResult) untested = list(result.augmented_results( lambda case_result: case_result.kind is CaseResult.Kind.UNTESTED)) running = list(result.augmented_results( lambda case_result: case_result.kind is CaseResult.Kind.RUNNING)) failures = list(result.augmented_results( lambda case_result: case_result.kind is CaseResult.Kind.FAILURE)) errors = list(result.augmented_results( lambda case_result: case_result.kind is CaseResult.Kind.ERROR)) successes = list(result.augmented_results( lambda case_result: case_result.kind is CaseResult.Kind.SUCCESS)) skips = list(result.augmented_results( lambda case_result: case_result.kind is CaseResult.Kind.SKIP)) expected_failures = list(result.augmented_results( lambda case_result: case_result.kind is CaseResult.Kind.EXPECTED_FAILURE)) unexpected_successes = list(result.augmented_results( lambda case_result: case_result.kind is CaseResult.Kind.UNEXPECTED_SUCCESS)) running_names = [case.name for case in running] finished_count = (len(failures) + len(errors) + len(successes) + len(expected_failures) + len(unexpected_successes)) statistics = ( '{finished} tests finished:\n' '\t{successful} successful\n' '\t{unsuccessful} unsuccessful\n' '\t{skipped} skipped\n' '\t{expected_fail} expected failures\n' '\t{unexpected_successful} unexpected successes\n' 'Interrupted Tests:\n' '\t{interrupted}\n' .format(finished=finished_count, successful=len(successes), unsuccessful=(len(failures)+len(errors)), skipped=len(skips), expected_fail=len(expected_failures), unexpected_successful=len(unexpected_successes), interrupted=str(running_names))) tracebacks = '\n\n'.join([ (_Colors.FAIL + '{test_name}' + _Colors.END + '\n' + _Colors.BOLD + 'traceback:' + _Colors.END + '\n' + '{traceback}\n' + _Colors.BOLD + 'stdout:' + _Colors.END + '\n' + '{stdout}\n' + _Colors.BOLD + 'stderr:' + _Colors.END + '\n' + '{stderr}\n').format( test_name=result.name, traceback=_traceback_string(*result.traceback), stdout=result.stdout, stderr=result.stderr) for result in itertools.chain(failures, errors) ]) notes = 'Unexpected successes: {}\n'.format([ result.name for result in unexpected_successes]) return statistics + '\nErrors/Failures: \n' + tracebacks + '\n' + notes def jenkins_junit_xml(result): """An XML tree object that when written is recognizable by Jenkins. Args: result (AugmentedResult): The result object to get the junit xml output of. Returns: ElementTree.ElementTree: The XML tree. """ assert isinstance(result, AugmentedResult) root = ElementTree.Element('testsuites') suite = ElementTree.SubElement(root, 'testsuite', { 'name': 'Python gRPC tests', }) for case in result.cases.values(): if case.kind is CaseResult.Kind.SUCCESS: ElementTree.SubElement(suite, 'testcase', { 'name': case.name, }) elif case.kind in (CaseResult.Kind.ERROR, CaseResult.Kind.FAILURE): case_xml = ElementTree.SubElement(suite, 'testcase', { 'name': case.name, }) error_xml = ElementTree.SubElement(case_xml, 'error', {}) error_xml.text = ''.format(case.stderr, case.traceback) return ElementTree.ElementTree(element=root)
bsd-3-clause
pudo-attic/voteit-server
voteit/manage.py
2
1824
import json from flask.ext.script import Manager from voteit.core import db, issues from voteit.web import app from voteit.loader import load_motions from voteit.loader import load_parties, load_people manager = Manager(app) @manager.command def loadfile(file_name): """ Load motions from a JSON file. """ with open(file_name, 'rb') as fh: data = json.load(fh) load_parties(data.get('parties', {}).values()) load_people(data.get('people', {}).values()) load_motions(data) @manager.command def loadpeople(file_name): """ Load people from a JSON file. """ with open(file_name, 'rb') as fh: data = json.load(fh) load_people(data) @manager.command def loadparties(file_name): """ Load parties from a JSON file. """ with open(file_name, 'rb') as fh: data = json.load(fh) load_parties(data) @manager.command def reset(): for coll in db.collection_names(): if coll in ['issues', 'system.indexes', 'system.users']: continue print coll db.drop_collection(coll) @manager.command def deleteissues(): db.drop_collection(issues) @manager.command def deletepeople(): db.drop_collection('persons') @manager.command def deleteparties(): db.drop_collection('parties') # todo: guarantee that motions exist... @manager.command def addtestissue(): db.issues.insert({ 'title': 'Finnish Misogynist Union Stance', 'phrase': 'making homophobia, xenophobia and supressed anger an art form', 'motions': [{ 'motion_id': 'motion-62-2012-1', 'weights': {'yes': 23} }, { 'motion_id': 'motion-62-2012-2', 'weights': {'yes': -23} }] }) def run(): manager.run() if __name__ == "__main__": run()
mit
toshywoshy/ansible
lib/ansible/modules/network/fortios/fortios_router_bfd.py
7
8944
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_router_bfd short_description: Configure BFD in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify router feature and bfd category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true version_added: 2.9 router_bfd: description: - Configure BFD. default: null type: dict suboptions: neighbor: description: - neighbor type: list suboptions: interface: description: - Interface name. Source system.interface.name. type: str ip: description: - IPv4 address of the BFD neighbor. required: true type: str ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure BFD. fortios_router_bfd: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" router_bfd: neighbor: - interface: "<your_own_value> (source system.interface.name)" ip: "<your_own_value>" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_router_bfd_data(json): option_list = ['neighbor'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for i, elem in enumerate(data): data[i] = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def router_bfd(data, fos): vdom = data['vdom'] router_bfd_data = data['router_bfd'] filtered_data = underscore_to_hyphen(filter_router_bfd_data(router_bfd_data)) return fos.set('router', 'bfd', data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_router(data, fos): if data['router_bfd']: resp = router_bfd(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "router_bfd": { "required": False, "type": "dict", "default": None, "options": { "neighbor": {"required": False, "type": "list", "options": { "interface": {"required": False, "type": "str"}, "ip": {"required": True, "type": "str"} }} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_router(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_router(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
gpl-3.0
bcharlas/mytrunk
py/ymport.py
2
17854
""" Import geometry from various formats ('import' is python keyword, hence the name 'ymport'). """ from yade.wrapper import * from yade import utils from minieigen import * def textExt(fileName,format='x_y_z_r',shift=Vector3.Zero,scale=1.0,attrs=[],**kw): """Load sphere coordinates from file in specific format, returns a list of corresponding bodies; that may be inserted to the simulation with O.bodies.append(). :param str filename: file name :param str format: the name of output format. Supported `x_y_z_r`(default), `x_y_z_r_matId`, 'x_y_z_r_attrs' :param [float,float,float] shift: [X,Y,Z] parameter moves the specimen. :param float scale: factor scales the given data. :param list attrs: attrs read from file if export.textExt(format='x_y_z_r_attrs') were used ('passed by refernece' style) :param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.sphere` :returns: list of spheres. Lines starting with # are skipped """ infile = open(fileName,"r") lines = infile.readlines() infile.close() ret=[] for line in lines: data = line.split() if (data[0] == "#format"): format=data[1] continue elif (data[0][0] == "#"): continue if (format=='x_y_z_r'): pos = Vector3(float(data[0]),float(data[1]),float(data[2])) ret.append(utils.sphere(shift+scale*pos,scale*float(data[3]),**kw)) elif (format=='x_y_z_r_matId'): pos = Vector3(float(data[0]),float(data[1]),float(data[2])) ret.append(utils.sphere(shift+scale*pos,scale*float(data[3]),material=int(data[4]),**kw)) elif (format=='id_x_y_z_r_matId'): pos = Vector3(float(data[1]),float(data[2]),float(data[3])) ret.append(utils.sphere(shift+scale*pos,scale*float(data[4]),material=int(data[5]),**kw)) elif (format=='x_y_z_r_attrs'): pos = Vector3(float(data[0]),float(data[1]),float(data[2])) s = utils.sphere(shift+scale*pos,scale*float(data[3]),**kw) ret.append(s) attrs.append(data[4:]) else: raise RuntimeError("Please, specify a correct format output!"); return ret def textClumps(fileName,shift=Vector3.Zero,discretization=0,orientation=Quaternion((0,1,0),0.0),scale=1.0,**kw): """Load clumps-members from file, insert them to the simulation. :param str filename: file name :param str format: the name of output format. Supported `x_y_z_r`(default), `x_y_z_r_clumpId` :param [float,float,float] shift: [X,Y,Z] parameter moves the specimen. :param float scale: factor scales the given data. :param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.sphere` :returns: list of spheres. Lines starting with # are skipped """ infile = open(fileName,"r") lines = infile.readlines() infile.close() ret=[] curClump=[] newClumpId = -1 for line in lines: data = line.split() if (data[0][0] == "#"): continue pos = orientation*Vector3(float(data[0]),float(data[1]),float(data[2])) if (newClumpId<0 or newClumpId==int(data[4])): idD = curClump.append(utils.sphere(shift+scale*pos,scale*float(data[3]),**kw)) newClumpId = int(data[4]) else: newClumpId = int(data[4]) ret.append(O.bodies.appendClumped(curClump,discretization=discretization)) curClump=[] idD = curClump.append(utils.sphere(shift+scale*pos,scale*float(data[3]),**kw)) if (len(curClump)<>0): ret.append(O.bodies.appendClumped(curClump,discretization=discretization)) # Set the mask to a clump the same as the first member of it for i in range(len(ret)): O.bodies[ret[i][0]].mask = O.bodies[ret[i][1][0]].mask return ret def text(fileName,shift=Vector3.Zero,scale=1.0,**kw): """Load sphere coordinates from file, returns a list of corresponding bodies; that may be inserted to the simulation with O.bodies.append(). :param string filename: file which has 4 colums [x, y, z, radius]. :param [float,float,float] shift: [X,Y,Z] parameter moves the specimen. :param float scale: factor scales the given data. :param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.sphere` :returns: list of spheres. Lines starting with # are skipped """ return textExt(fileName=fileName,format='x_y_z_r',shift=shift,scale=scale,**kw) def stl(file, dynamic=None,fixed=True,wire=True,color=None,highlight=False,noBound=False,material=-1): """ Import geometry from stl file, return list of created facets.""" imp = STLImporter() facets=imp.ymport(file) for b in facets: b.shape.color=color if color else utils.randomColor() b.shape.wire=wire b.shape.highlight=highlight pos=b.state.pos utils._commonBodySetup(b,0,Vector3(0,0,0),material=material,pos=pos,noBound=noBound,dynamic=dynamic,fixed=fixed) b.aspherical=False return facets def gts(meshfile,shift=Vector3.Zero,scale=1.0,**kw): """ Read given meshfile in gts format. :Parameters: `meshfile`: string name of the input file. `shift`: [float,float,float] [X,Y,Z] parameter moves the specimen. `scale`: float factor scales the given data. `**kw`: (unused keyword arguments) is passed to :yref:`yade.utils.facet` :Returns: list of facets. """ import gts,yade.pack surf=gts.read(open(meshfile)) surf.scale(scale,scale,scale) surf.translate(shift[0],shift[1],shift[2]) yade.pack.gtsSurface2Facets(surf,**kw) def gmsh(meshfile="file.mesh",shift=Vector3.Zero,scale=1.0,orientation=Quaternion((0,1,0),0.0),**kw): """ Imports geometry from .mesh file and creates facets. :Parameters: `shift`: [float,float,float] [X,Y,Z] parameter moves the specimen. `scale`: float factor scales the given data. `orientation`: quaternion orientation of the imported mesh `**kw`: (unused keyword arguments) is passed to :yref:`yade.utils.facet` :Returns: list of facets forming the specimen. mesh files can easily be created with `GMSH <http://www.geuz.org/gmsh/>`_. Example added to :ysrc:`examples/packs/packs.py` Additional examples of mesh-files can be downloaded from http://www-roc.inria.fr/gamma/download/download.php """ infile = open(meshfile,"r") lines = infile.readlines() infile.close() nodelistVector3=[] elementlistVector3=[] # for deformable elements findVerticesString=0 while (lines[findVerticesString].split()[0]<>'Vertices'): #Find the string with the number of Vertices findVerticesString+=1 findVerticesString+=1 numNodes = int(lines[findVerticesString].split()[0]) for i in range(numNodes): nodelistVector3.append(Vector3(0.0,0.0,0.0)) id = 0 for line in lines[findVerticesString+1:numNodes+findVerticesString+1]: data = line.split() nodelistVector3[id] = orientation*Vector3(float(data[0])*scale,float(data[1])*scale,float(data[2])*scale)+shift id += 1 findTriangleString=findVerticesString+numNodes while (lines[findTriangleString].split()[0]<>'Triangles'): #Find the string with the number of Triangles findTriangleString+=1 findTriangleString+=1 numTriangles = int(lines[findTriangleString].split()[0]) triList = [] for i in range(numTriangles): triList.append([0,0,0,0]) tid = 0 for line in lines[findTriangleString+1:findTriangleString+numTriangles+1]: data = line.split() id1 = int(data[0])-1 id2 = int(data[1])-1 id3 = int(data[2])-1 triList[tid][0] = tid triList[tid][1] = id1 triList[tid][2] = id2 triList[tid][3] = id3 tid += 1 ret=[] for i in triList: a=nodelistVector3[i[1]] b=nodelistVector3[i[2]] c=nodelistVector3[i[3]] ret.append(utils.facet((nodelistVector3[i[1]],nodelistVector3[i[2]],nodelistVector3[i[3]]),**kw)) return ret def gengeoFile(fileName="file.geo",shift=Vector3.Zero,scale=1.0,orientation=Quaternion((0,1,0),0.0),**kw): """ Imports geometry from LSMGenGeo .geo file and creates spheres. Since 2012 the package is available in Debian/Ubuntu and known as python-demgengeo http://packages.qa.debian.org/p/python-demgengeo.html :Parameters: `filename`: string file which has 4 colums [x, y, z, radius]. `shift`: Vector3 Vector3(X,Y,Z) parameter moves the specimen. `scale`: float factor scales the given data. `orientation`: quaternion orientation of the imported geometry `**kw`: (unused keyword arguments) is passed to :yref:`yade.utils.sphere` :Returns: list of spheres. LSMGenGeo library allows one to create pack of spheres with given [Rmin:Rmax] with null stress inside the specimen. Can be useful for Mining Rock simulation. Example: :ysrc:`examples/packs/packs.py`, usage of LSMGenGeo library in :ysrc:`examples/test/genCylLSM.py`. * https://answers.launchpad.net/esys-particle/+faq/877 * http://www.access.edu.au/lsmgengeo_python_doc/current/pythonapi/html/GenGeo-module.html * https://svn.esscc.uq.edu.au/svn/esys3/lsm/contrib/LSMGenGeo/""" from yade.utils import sphere infile = open(fileName,"r") lines = infile.readlines() infile.close() numSpheres = int(lines[6].split()[0]) ret=[] for line in lines[7:numSpheres+7]: data = line.split() pos = orientation*Vector3(float(data[0]),float(data[1]),float(data[2])) ret.append(utils.sphere(shift+scale*pos,scale*float(data[3]),**kw)) return ret def gengeo(mntable,shift=Vector3.Zero,scale=1.0,**kw): """ Imports geometry from LSMGenGeo library and creates spheres. Since 2012 the package is available in Debian/Ubuntu and known as python-demgengeo http://packages.qa.debian.org/p/python-demgengeo.html :Parameters: `mntable`: mntable object, which creates by LSMGenGeo library, see example `shift`: [float,float,float] [X,Y,Z] parameter moves the specimen. `scale`: float factor scales the given data. `**kw`: (unused keyword arguments) is passed to :yref:`yade.utils.sphere` LSMGenGeo library allows one to create pack of spheres with given [Rmin:Rmax] with null stress inside the specimen. Can be useful for Mining Rock simulation. Example: :ysrc:`examples/packs/packs.py`, usage of LSMGenGeo library in :ysrc:`examples/test/genCylLSM.py`. * https://answers.launchpad.net/esys-particle/+faq/877 * http://www.access.edu.au/lsmgengeo_python_doc/current/pythonapi/html/GenGeo-module.html * https://svn.esscc.uq.edu.au/svn/esys3/lsm/contrib/LSMGenGeo/""" try: from GenGeo import MNTable3D,Sphere except ImportError: from gengeo import MNTable3D,Sphere ret=[] sphereList=mntable.getSphereListFromGroup(0) for i in range(0, len(sphereList)): r=sphereList[i].Radius() c=sphereList[i].Centre() ret.append(utils.sphere([shift[0]+scale*float(c.X()),shift[1]+scale*float(c.Y()),shift[2]+scale*float(c.Z())],scale*float(r),**kw)) return ret def unv(fileName,shift=(0,0,0),scale=1.0,returnConnectivityTable=False,**kw): """ Import geometry from unv file, return list of created facets. :param string fileName: name of unv file :param (float,float,float)|Vector3 shift: (X,Y,Z) parameter moves the specimen. :param float scale: factor scales the given data. :param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.facet` :param bool returnConnectivityTable: if True, apart from facets returns also nodes (list of (x,y,z) nodes coordinates) and elements (list of (id1,id2,id3) element nodes ids). If False (default), returns only facets unv files are mainly used for FEM analyses (are used by `OOFEM <http://www.oofem.org/>`_ and `Abaqus <http://www.simulia.com/products/abaqus_fea.html>`_), but triangular elements can be imported as facets. These files cen be created e.g. with open-source free software `Salome <http://salome-platform.org>`_. Example: :ysrc:`examples/test/unv-read/unvRead.py`.""" class UNVReader: # class used in ymport.unv function # reads and evaluate given unv file and extracts all triangles # can be extended to read tetrahedrons as well def __init__(self,fileName,shift=(0,0,0),scale=1.0,returnConnectivityTable=False,**kw): self.shift = shift self.scale = scale self.unvFile = open(fileName,'r') self.flag = 0 self.line = self.unvFile.readline() self.lineSplit = self.line.split() self.nodes = [] self.elements = [] self.read(**kw) def readLine(self): self.line = self.unvFile.readline() self.lineSplit = self.line.split() def read(self,**kw): while self.line: self.evalLine() self.line = self.unvFile.readline() self.unvFile.close() self.createFacets(**kw) def evalLine(self): self.lineSplit = self.line.split() if len(self.lineSplit) <= 1: # eval special unv format if self.lineSplit[0] == '-1': pass elif self.lineSplit[0] == '2411': self.flag = 1; # nodes elif self.lineSplit[0] == '2412': self.flag = 2; # edges (lines) else: self.flag = 4; # volume elements or other, not interesting for us (at least yet) elif self.flag == 1: self.evalNodes() elif self.flag == 2: self.evalEdge() elif self.flag == 3: self.evalFacet() #elif self.flag == 4: self.evalGroup() def evalNodes(self): self.readLine() self.nodes.append(( self.shift[0]+self.scale*float(self.lineSplit[0]), self.shift[1]+self.scale*float(self.lineSplit[1]), self.shift[2]+self.scale*float(self.lineSplit[2]))) def evalEdge(self): if self.lineSplit[1]=='41': self.flag = 3 self.evalFacet() else: self.readLine() self.readLine() def evalFacet(self): if self.lineSplit[1]=='41': # triangle self.readLine() self.elements.append(( int(self.lineSplit[0])-1, int(self.lineSplit[1])-1, int(self.lineSplit[2])-1)) else: # is not triangle self.readLine() self.flag = 4 # can be added function to handle tetrahedrons def createFacets(self,**kw): self.facets = [utils.facet(tuple(self.nodes[i] for i in e),**kw) for e in self.elements] # unvReader = UNVReader(fileName,shift,scale,returnConnectivityTable,**kw) if returnConnectivityTable: return unvReader.facets, unvReader.nodes, unvReader.elements return facets def iges(fileName,shift=(0,0,0),scale=1.0,returnConnectivityTable=False,**kw): """ Import triangular mesh from .igs file, return list of created facets. :param string fileName: name of iges file :param (float,float,float)|Vector3 shift: (X,Y,Z) parameter moves the specimen. :param float scale: factor scales the given data. :param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.facet` :param bool returnConnectivityTable: if True, apart from facets returns also nodes (list of (x,y,z) nodes coordinates) and elements (list of (id1,id2,id3) element nodes ids). If False (default), returns only facets """ nodes,elems = [],[] f = open(fileName) for line in f: if line.startswith('134,'): # read nodes coordinates ls = line.split(',') v = Vector3( float(ls[1])*scale + shift[0], float(ls[2])*scale + shift[1], float(ls[3])*scale + shift[2] ) nodes.append(v) if line.startswith('136,'): # read elements ls = line.split(',') i1,i2,i3 = int(ls[3])/2, int(ls[4])/2, int(ls[5])/2 # the numbering of nodes is 1,3,5,7,..., hence this int(ls[*])/2 elems.append( (i1,i2,i3) ) facets = [utils.facet( ( nodes[e[0]], nodes[e[1]], nodes[e[2]] ), **kw) for e in elems] if returnConnectivityTable: return facets, nodes, elems return facets def ele(nodeFileName,eleFileName,shift=(0,0,0),scale=1.0,**kw): """ Import tetrahedral mesh from .ele file, return list of created tetrahedrons. :param string nodeFileName: name of .node file :param string eleFileName: name of .ele file :param (float,float,float)|Vector3 shift: (X,Y,Z) parameter moves the specimen. :param float scale: factor scales the given data. :param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.polyhedron` """ f = open(nodeFileName) line = f.readline() while line.startswith('#'): line = f.readline() ls = line.split() nVertices = int(ls[0]) if int(ls[1])!=3: raise RuntimeError, "wrong .node file, number of dimensions should be 3" vertices = [None for i in xrange(nVertices)] shift = Vector3(shift) for i in xrange(nVertices): line = f.readline() while line.startswith('#'): line = f.readline() ls = line.split() if not ls: continue v = shift + scale*Vector3(tuple(float(ls[j]) for j in (1,2,3))) vertices[int(ls[0])-1] = v f.close() # f = open(eleFileName) line = f.readline() while line.startswith('#'): line = f.readline() ls = line.split() if int(ls[1])!=4: raise RuntimeError, "wrong .ele file, unsupported tetrahedra's number of nodes" nTetras = int(ls[0]) tetras = [None for i in xrange(nTetras)] for i in xrange(nTetras): ls = f.readline().split() tetras[int(ls[0])-1] = utils.polyhedron([vertices[int(ls[j])-1] for j in (1,2,3,4)],**kw) f.close() return tetras def textPolyhedra(fileName,material,shift=Vector3.Zero,scale=1.0,orientation=Quaternion((0,1,0),0.0),**kw): from yade import polyhedra_utils """Load polyhedra from a text file. :param str filename: file name :param [float,float,float] shift: [X,Y,Z] parameter moves the specimen. :param float scale: factor scales the given data. :param quaternion orientation: orientation of the imported polyhedra :param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.polyhedra_utils.polyhedra` :returns: list of polyhedras. Lines starting with # are skipped """ infile = open(fileName,"r") lines = infile.readlines() infile.close() ret=[] i=-1 while (i < (len(lines)-1)): i+=1 line = lines[i] data = line.split() if (data[0][0] == "#"): continue if (len(data)!=3): raise RuntimeError("Check polyhedra input file! Number of parameters in the first line is not 3!"); else: vertLoad = [] ids = int(data[0]) verts = int(data[1]) surfs = int(data[2]) i+=1 for d in range(verts): dataV = lines[i].split() pos = orientation*Vector3(float(dataV[0])*scale,float(dataV[1])*scale,float(dataV[2])*scale)+shift vertLoad.append(pos) i+=1 polR = polyhedra_utils.polyhedra(material=material,v=vertLoad,**kw) ret.append(polR) i= i + surfs - 1 return ret
gpl-2.0
John-Hart/autorest
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyByte/autorestswaggerbatbyteservice/operations/byte.py
2
8615
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.pipeline import ClientRawResponse from .. import models class Byte(object): """Byte operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An objec model deserializer. """ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.config = config def get_null( self, custom_headers=None, raw=False, **operation_config): """Get null byte value. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: bytearray :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/byte/null' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('bytearray', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def get_empty( self, custom_headers=None, raw=False, **operation_config): """Get empty byte value ''. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: bytearray :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/byte/empty' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('bytearray', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def get_non_ascii( self, custom_headers=None, raw=False, **operation_config): """Get non-ascii byte string hex(FF FE FD FC FB FA F9 F8 F7 F6). :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: bytearray :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/byte/nonAscii' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('bytearray', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def put_non_ascii( self, byte_body, custom_headers=None, raw=False, **operation_config): """Put non-ascii byte string hex(FF FE FD FC FB FA F9 F8 F7 F6). :param byte_body: Base64-encoded non-ascii byte string hex(FF FE FD FC FB FA F9 F8 F7 F6) :type byte_body: bytearray :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/byte/nonAscii' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(byte_body, 'bytearray') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def get_invalid( self, custom_headers=None, raw=False, **operation_config): """Get invalid byte value ':::SWAGGER::::'. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: bytearray :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true """ # Construct URL url = '/byte/invalid' # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('bytearray', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
mit
stgraber/snapcraft
snapcraft/plugins/make.py
2
3995
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright (C) 2015, 2016 Canonical Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """The make plugin is useful for building make based parts. Make based projects are projects that have a Makefile that drives the build. This plugin always runs 'make' followed by 'make install', except when the 'artifacts' keyword is used. This plugin uses the common plugin keywords as well as those for "sources". For more information check the 'plugins' topic for the former and the 'sources' topic for the latter. Additionally, this plugin uses the following plugin-specific keywords: - artifacts: (list) Link/copy the given files from the make output to the snap installation directory. If specified, the 'make install' step will be skipped. - makefile: (string) Use the given file as the makefile. - make-parameters: (list of strings) Pass the given parameters to the make command. - make-install-var: (string; default: DESTDIR) Use this variable to redirect the installation into the snap. """ import os import snapcraft import snapcraft.common class MakePlugin(snapcraft.BasePlugin): @classmethod def schema(cls): schema = super().schema() schema['properties']['makefile'] = { 'type': 'string', } schema['properties']['make-parameters'] = { 'type': 'array', 'minitems': 1, 'uniqueItems': True, 'items': { 'type': 'string', }, 'default': [], } schema['properties']['make-install-var'] = { 'type': 'string', 'default': 'DESTDIR', } schema['properties']['artifacts'] = { 'type': 'array', 'minitems': 1, 'uniqueItems': True, 'items': { 'type': 'string', }, 'default': [], } # Inform Snapcraft of the properties associated with building. If these # change in the YAML Snapcraft will consider the build step dirty. schema['build-properties'].extend( ['makefile', 'make-parameters', 'make-install-var']) return schema def __init__(self, name, options, project): super().__init__(name, options, project) self.build_packages.append('make') def build(self): super().build() command = ['make'] if self.options.makefile: command.extend(['-f', self.options.makefile]) if self.options.make_parameters: command.extend(self.options.make_parameters) self.run(command + ['-j{}'.format(self.parallel_build_count)]) if self.options.artifacts: for artifact in self.options.artifacts: source_path = os.path.join(self.builddir, artifact) destination_path = os.path.join(self.installdir, artifact) if os.path.isdir(source_path): snapcraft.file_utils.link_or_copy_tree( source_path, destination_path) else: snapcraft.file_utils.link_or_copy( source_path, destination_path) else: install_param = self.options.make_install_var + '=' + \ self.installdir self.run(command + ['install', install_param])
gpl-3.0
Jgarcia-IAS/localizacion
openerp/addons/stock/report/__init__.py
376
1088
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import product_stock import report_stock # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
CallaJun/hackprince
bp_includes/forms.py
20
5263
""" Created on June 10, 2012 @author: peta15 """ from wtforms import fields from wtforms import Form from wtforms import validators from bp_includes.lib import utils from webapp2_extras.i18n import lazy_gettext as _ from webapp2_extras.i18n import ngettext, gettext FIELD_MAXLENGTH = 50 # intended to stop maliciously long input class FormTranslations(object): def gettext(self, string): return gettext(string) def ngettext(self, singular, plural, n): return ngettext(singular, plural, n) class BaseForm(Form): def __init__(self, request_handler): super(BaseForm, self).__init__(request_handler.request.POST) def _get_translations(self): return FormTranslations() # ==== Mixins ==== class PasswordConfirmMixin(BaseForm): password = fields.TextField(_('Password'), [validators.Required(), validators.Length(max=FIELD_MAXLENGTH, message=_( "Field cannot be longer than %(max)d characters."))]) c_password = fields.TextField(_('Confirm Password'), [validators.Required(), validators.EqualTo('password', _('Passwords must match.')), validators.Length(max=FIELD_MAXLENGTH, message=_("Field cannot be longer than %(max)d characters."))]) class UsernameMixin(BaseForm): username = fields.TextField(_('Username'), [validators.Required(), validators.Length(max=FIELD_MAXLENGTH, message=_( "Field cannot be longer than %(max)d characters.")), validators.regexp(utils.VALID_USERNAME_REGEXP, message=_( "Username invalid. Use only letters and numbers."))]) class UsernameEmailMixin(BaseForm): username = fields.TextField(_('Username'), [validators.Required(), validators.Length(max=FIELD_MAXLENGTH, message=_( "Field cannot be longer than %(max)d characters."))]) class NameMixin(BaseForm): name = fields.TextField(_('Name'), [ validators.Length(max=FIELD_MAXLENGTH, message=_("Field cannot be longer than %(max)d characters.")), validators.regexp(utils.NAME_LASTNAME_REGEXP, message=_( "Name invalid. Use only letters and numbers."))]) last_name = fields.TextField(_('Last Name'), [ validators.Length(max=FIELD_MAXLENGTH, message=_("Field cannot be longer than %(max)d characters.")), validators.regexp(utils.NAME_LASTNAME_REGEXP, message=_( "Last Name invalid. Use only letters and numbers."))]) class EmailMixin(BaseForm): email = fields.TextField(_('Email'), [validators.Required(), validators.Length(min=8, max=FIELD_MAXLENGTH, message=_( "Field must be between %(min)d and %(max)d characters long.")), validators.regexp(utils.EMAIL_REGEXP, message=_('Invalid email address.'))]) # ==== Forms ==== class PasswordResetCompleteForm(PasswordConfirmMixin): pass class LoginForm(UsernameEmailMixin): password = fields.TextField(_('Password'), [validators.Required(), validators.Length(max=FIELD_MAXLENGTH, message=_( "Field cannot be longer than %(max)d characters."))], id='l_password') pass class RegisterForm(PasswordConfirmMixin, UsernameMixin, NameMixin, EmailMixin): country = fields.SelectField(_('Country'), choices=[]) tz = fields.SelectField(_('Timezone'), choices=[]) pass class EditProfileForm(UsernameMixin, NameMixin): country = fields.SelectField(_('Country'), choices=[]) tz = fields.SelectField(_('Timezone'), choices=[]) pass class EditPasswordForm(PasswordConfirmMixin): current_password = fields.TextField(_('Password'), [validators.Required(), validators.Length(max=FIELD_MAXLENGTH, message=_( "Field cannot be longer than %(max)d characters."))]) pass class EditEmailForm(BaseForm): new_email = fields.TextField(_('Email'), [validators.Required(), validators.Length(min=8, max=FIELD_MAXLENGTH, message=_( "Field must be between %(min)d and %(max)d characters long.")), validators.regexp(utils.EMAIL_REGEXP, message=_('Invalid email address.'))]) password = fields.TextField(_('Password'), [validators.Required(), validators.Length(max=FIELD_MAXLENGTH, message=_( "Field cannot be longer than %(max)d characters."))]) pass
lgpl-3.0
elboby/flask-config-override
flask_config_override/__init__.py
1
1884
from flask import request import logging from .proxy_config import ProxyConfig from .cookie_utils import cookie_to_json from .blueprint import blueprint as config_override_bp logger = logging.getLogger(__name__) class ConfigOverride(object): """ Flask extension used to control the config override mechanism. """ def __init__(self, app=None): self.app = app if app is not None: self.init_app(app) def init_app(self, app): self._handle_config(app) if not app.config['CONFIG_OVERRIDE_COOKIE_ENABLED']: logger.debug("Config override disabled") return logger.debug("Attaching the proxy config...") app.config = ProxyConfig(app.config) logger.debug("Attaching the config override blueprint...") app.register_blueprint(config_override_bp, url_prefix='/config_override') @app.before_request def override_config_from_cookie(): """ Load overriding config from cookie. """ data = request.cookies.get(app.config['CONFIG_OVERRIDE_COOKIE_NAME']) logger.debug("Config override by cookie: %s" % data) try: if data: app.config.set_overriden_configuration(cookie_to_json(data)) except ValueError: logger.warning("Config override ABORTED as cookie is \ malformed: %s" % data) def _handle_config(self, app): """ Setup default values if missing. """ default = {'CONFIG_OVERRIDE_COOKIE_NAME': 'config_override_options', 'CONFIG_OVERRIDE_COOKIE_ENABLED': True, 'CONFIG_OVERRIDE_EXTENDABLE_VARS': [], } default.update(app.config) app.config = default
bsd-3-clause
mapennell/ansible
lib/ansible/utils/color.py
167
3134
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys from ansible import constants as C ANSIBLE_COLOR=True if C.ANSIBLE_NOCOLOR: ANSIBLE_COLOR=False elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty(): ANSIBLE_COLOR=False else: try: import curses curses.setupterm() if curses.tigetnum('colors') < 0: ANSIBLE_COLOR=False except ImportError: # curses library was not found pass except curses.error: # curses returns an error (e.g. could not find terminal) ANSIBLE_COLOR=False if C.ANSIBLE_FORCE_COLOR: ANSIBLE_COLOR=True # --- begin "pretty" # # pretty - A miniature library that provides a Python print and stdout # wrapper that makes colored terminal text easier to use (e.g. without # having to mess around with ANSI escape sequences). This code is public # domain - there is no license except that you must leave this header. # # Copyright (C) 2008 Brian Nez <thedude at bri1 dot com> # # http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/ codeCodes = { 'black': '0;30', 'bright gray': '0;37', 'blue': '0;34', 'white': '1;37', 'green': '0;32', 'bright blue': '1;34', 'cyan': '0;36', 'bright green': '1;32', 'red': '0;31', 'bright cyan': '1;36', 'purple': '0;35', 'bright red': '1;31', 'yellow': '0;33', 'bright purple': '1;35', 'dark gray': '1;30', 'bright yellow': '1;33', 'normal': '0' } def stringc(text, color): """String in color.""" if ANSIBLE_COLOR: return "\033["+codeCodes[color]+"m"+text+"\033[0m" else: return text # --- end "pretty" def colorize(lead, num, color): """ Print 'lead' = 'num' in 'color' """ if num != 0 and ANSIBLE_COLOR and color is not None: return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color)) else: return "%s=%-4s" % (lead, str(num)) def hostcolor(host, stats, color=True): if ANSIBLE_COLOR and color: if stats['failures'] != 0 or stats['unreachable'] != 0: return "%-37s" % stringc(host, 'red') elif stats['changed'] != 0: return "%-37s" % stringc(host, 'yellow') else: return "%-37s" % stringc(host, 'green') return "%-26s" % host
gpl-3.0
ProjectSWGCore/NGECore2
scripts/mobiles/corellia/selonian_separatist_general.py
2
3086
import sys from services.spawn import MobileTemplate from services.spawn import WeaponTemplate from resources.datatables import WeaponType from resources.datatables import Difficulty from resources.datatables import Options from java.util import Vector def addTemplate(core): mobileTemplate = MobileTemplate() mobileTemplate.setCreatureName('selonian_separatist_general') mobileTemplate.setLevel(40) mobileTemplate.setDifficulty(Difficulty.NORMAL) mobileTemplate.setMinSpawnDistance(4) mobileTemplate.setMaxSpawnDistance(8) mobileTemplate.setDeathblow(True) mobileTemplate.setScale(1) mobileTemplate.setSocialGroup("selonian") mobileTemplate.setAssistRange(6) mobileTemplate.setStalker(True) mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE) templates = Vector() templates.add('object/mobile/shared_dressed_selonian_m_01.iff') templates.add('object/mobile/shared_dressed_selonian_m_02.iff') templates.add('object/mobile/shared_dressed_selonian_m_03.iff') templates.add('object/mobile/shared_dressed_selonian_m_04.iff') templates.add('object/mobile/shared_dressed_selonian_m_05.iff') templates.add('object/mobile/shared_dressed_selonian_m_06.iff') templates.add('object/mobile/shared_dressed_selonian_m_07.iff') templates.add('object/mobile/shared_dressed_selonian_m_08.iff') templates.add('object/mobile/shared_dressed_selonian_m_09.iff') templates.add('object/mobile/shared_dressed_selonian_m_10.iff') templates.add('object/mobile/shared_dressed_selonian_m_11.iff') templates.add('object/mobile/shared_dressed_selonian_m_12.iff') templates.add('object/mobile/shared_dressed_selonian_f_01.iff') templates.add('object/mobile/shared_dressed_selonian_f_02.iff') templates.add('object/mobile/shared_dressed_selonian_f_03.iff') templates.add('object/mobile/shared_dressed_selonian_f_04.iff') templates.add('object/mobile/shared_dressed_selonian_f_05.iff') templates.add('object/mobile/shared_dressed_selonian_f_06.iff') templates.add('object/mobile/shared_dressed_selonian_f_07.iff') templates.add('object/mobile/shared_dressed_selonian_f_08.iff') templates.add('object/mobile/shared_dressed_selonian_f_09.iff') templates.add('object/mobile/shared_dressed_selonian_f_10.iff') templates.add('object/mobile/shared_dressed_selonian_f_11.iff') templates.add('object/mobile/shared_dressed_selonian_f_12.iff') mobileTemplate.setTemplates(templates) weaponTemplates = Vector() weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e11.iff', WeaponType.CARBINE, 1.0, 15, 'energy') weaponTemplates.add(weapontemplate) mobileTemplate.setWeaponTemplateVector(weaponTemplates) attacks = Vector() mobileTemplate.setDefaultAttack('rangedShot') mobileTemplate.setAttacks(attacks) lootPoolNames_1 = ['Junk'] lootPoolChances_1 = [100] lootGroupChance_1 = 100 mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1) core.spawnService.addMobileTemplate('selonian_separatist_general', mobileTemplate) return
lgpl-3.0
Pointedstick/ReplicatorG
skein_engines/skeinforge-35/skeinforge_application/skeinforge_plugins/analyze_plugins/export_canvas_plugins/scalable_vector_graphics.py
6
6965
""" This page is in the table of contents. Scalable vector graphics is an export canvas plugin to export the canvas to a scalable vector graphics (.svg) file. When the export menu item in the file menu in an analyze viewer tool, like skeinlayer or skeiniso is clicked, the postscript dialog will be displayed. When the 'Export to Scalable Vector Graphics' button on that dialog is clicked, the canvas will be exported as a scalable vector graphics file. If the 'Scalable Vector Graphics Program' is set to the default 'webbrowser', the scalable vector graphics file will be sent to the default browser to be opened. If the 'Scalable Vector Graphics Program' is set to a program name, the scalable vector graphics file will be sent to that program to be opened. If furthermore the 'File Extension' is set to a file extension, the scalable vector graphics file will be sent to the program, along with the file extension for the converted output. The default is blank because some systems do not have an image conversion program; if you have or will install an image conversion program, a common 'File Extension' is png. A good open source conversion program is Image Magick, which is available at: http://www.imagemagick.org/script/index.php An export canvas plugin is a script in the export_canvas_plugins folder which has the function getNewRepository, and which has a repository class with the functions setCanvasFileNameSuffix to set variables and execute to save the file. It is meant to be run from an analyze viewer tool, like skeinlayer or skeiniso. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name. """ from __future__ import absolute_import #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from fabmetheus_utilities import archive from fabmetheus_utilities import gcodec from fabmetheus_utilities import settings import cStringIO import os import sys __author__ = 'Enrique Perez (perez_enrique@yahoo.com)' __date__ = '$Date: 2008/21/04 $' __license__ = 'GPL 3.0' def getNewRepository(): "Get the repository constructor." return ScalableVectorGraphicsRepository() def parseLineReplace( firstWordTable, line, output ): "Parse the line and replace it if the first word of the line is in the first word table." firstWord = gcodec.getFirstWordFromLine(line) if firstWord in firstWordTable: line = firstWordTable[ firstWord ] gcodec.addLineAndNewlineIfNecessary( line, output ) class ScalableVectorGraphicsRepository: "A class to handle the export settings." def __init__(self): "Set the default settings, execute title & settings fileName." settings.addListsToRepository( 'skeinforge_application.skeinforge_plugins.analyze_plugins.export_canvas_plugins.scalable_vector_graphics.html', None, self ) self.fileExtension = settings.StringSetting().getFromValue('File Extension:', self, '') self.svgViewer = settings.StringSetting().getFromValue('SVG Viewer:', self, 'webbrowser') def addCanvasLineToOutput( self, canvasLinesOutput, objectIDNumber ): "Add the canvas line to the output." coordinates = self.canvas.coords( objectIDNumber ) xBegin = coordinates[0] - self.boxW xEnd = coordinates[2] - self.boxW yBegin = coordinates[1] - self.boxN yEnd = coordinates[3] - self.boxN west = self.boxW color = self.canvas.itemcget( objectIDNumber, 'fill') width = self.canvas.itemcget( objectIDNumber, 'width') line = '<line x1="%s" y1="%s" x2="%s" y2="%s" stroke="%s" stroke-width="%spx"/>\n' % ( xBegin, yBegin, xEnd, yEnd, color, width ) canvasLinesOutput.write( line + '\n') def execute(self): "Export the canvas as an svg file." svgFileName = archive.getFilePathWithUnderscoredBasename( self.fileName, self.suffix ) boundingBox = self.canvas.bbox( settings.Tkinter.ALL ) # tuple (w, n, e, s) self.boxW = boundingBox[0] self.boxN = boundingBox[1] boxWidth = boundingBox[2] - self.boxW boxHeight = boundingBox[3] - self.boxN print('Exported svg file saved as ' + svgFileName ) svgTemplateText = archive.getFileTextInFileDirectory( settings.__file__, os.path.join('templates', 'canvas_template.svg') ) output = cStringIO.StringIO() lines = archive.getTextLines( svgTemplateText ) firstWordTable = {} firstWordTable['height="999px"'] = ' height="%spx"' % int( round( boxHeight ) ) firstWordTable['<!--replaceLineWith_coloredLines-->'] = self.getCanvasLinesOutput() firstWordTable['replaceLineWithTitle'] = archive.getSummarizedFileName( self.fileName ) firstWordTable['width="999px"'] = ' width="%spx"' % int( round( boxWidth ) ) for line in lines: parseLineReplace( firstWordTable, line, output ) archive.writeFileText( svgFileName, output.getvalue() ) fileExtension = self.fileExtension.value svgViewer = self.svgViewer.value if svgViewer == '': return if svgViewer == 'webbrowser': settings.openWebPage( svgFileName ) return svgFilePath = '"' + os.path.normpath( svgFileName ) + '"' # " to send in file name with spaces shellCommand = svgViewer + ' ' + svgFilePath print('') if fileExtension == '': print('Sending the shell command:') print( shellCommand ) commandResult = os.system( shellCommand ) if commandResult != 0: print('It may be that the system could not find the %s program.' % svgViewer ) print('If so, try installing the %s program or look for another svg viewer, like Netscape which can be found at:' % svgViewer ) print('http://www.netscape.org/') return convertedFileName = archive.getFilePathWithUnderscoredBasename( svgFilePath, '.' + fileExtension + '"') shellCommand += ' ' + convertedFileName print('Sending the shell command:') print( shellCommand ) commandResult = os.system( shellCommand ) if commandResult != 0: print('The %s program could not convert the svg to the %s file format.' % ( svgViewer, fileExtension ) ) print('Try installing the %s program or look for another one, like Image Magick which can be found at:' % svgViewer ) print('http://www.imagemagick.org/script/index.php') def getCanvasLinesOutput(self): "Add the canvas line to the output." canvasLinesOutput = cStringIO.StringIO() objectIDNumbers = self.canvas.find_all() for objectIDNumber in objectIDNumbers: if self.canvas.type( objectIDNumber ) == 'line': self.addCanvasLineToOutput( canvasLinesOutput, objectIDNumber ) return canvasLinesOutput.getvalue() def setCanvasFileNameSuffix( self, canvas, fileName, suffix ): "Set the canvas and initialize the execute title." self.canvas = canvas self.executeTitle = 'Convert to Scalable Vector Graphics' self.fileName = fileName self.suffix = suffix + '.svg' def main(): "Display the file or directory dialog." settings.startMainLoopFromConstructor( getNewRepository() ) if __name__ == "__main__": main()
gpl-2.0
SucharithaPrabhakar/leosatellite
utils/tests/test-test.py
77
4843
#! /usr/bin/env python ## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*- # # Copyright (c) 2014 Siddharth Santurkar # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation; # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # NOTE: Run this script with the Python3 interpreter if the python3 compatibility # of the ns-3 unit test runner needs to be tested. # The following options of test.py are being tested for poratability by this script. # To see the options supported by this script, run with the -h option on the command line # # -h, --help show this help message and exit # -b BUILDPATH, --buildpath=BUILDPATH # specify the path where ns-3 was built (defaults to the # build directory for the current variant) # -c KIND, --constrain=KIND # constrain the test-runner by kind of test # -d, --duration print the duration of each test suite and example # -e EXAMPLE, --example=EXAMPLE # specify a single example to run (no relative path is # needed) # -u, --update-data If examples use reference data files, get them to re- # generate them # -f FULLNESS, --fullness=FULLNESS # choose the duration of tests to run: QUICK, EXTENSIVE, # or TAKES_FOREVER, where EXTENSIVE includes QUICK and # TAKES_FOREVER includes QUICK and EXTENSIVE (only QUICK # tests are run by default) # -g, --grind run the test suites and examples using valgrind # -k, --kinds print the kinds of tests available # -l, --list print the list of known tests # -m, --multiple report multiple failures from test suites and test # cases # -n, --nowaf do not run waf before starting testing # -p PYEXAMPLE, --pyexample=PYEXAMPLE # specify a single python example to run (with relative # path) # -r, --retain retain all temporary files (which are normally # deleted) # -s TEST-SUITE, --suite=TEST-SUITE # specify a single test suite to run # -t TEXT-FILE, --text=TEXT-FILE # write detailed test results into TEXT-FILE.txt # -v, --verbose print progress and informational messages # -w HTML-FILE, --web=HTML-FILE, --html=HTML-FILE # write detailed test results into HTML-FILE.html # -x XML-FILE, --xml=XML-FILE # write detailed test results into XML-FILE.xml from __future__ import print_function from TestBase import TestBaseClass import sys def main(argv): """ Prepares test cases and executes """ test_cases = [ '', '-h', '--help', '-b build/', '--buildpath=build/', '-c performance', '--constrain=performance', '-d', '--duration', '-e socket-options-ipv6', '--example=socket-options-ipv6', '-u', '--update-data', '-f EXTENSIVE --fullness=EXTENSIVE' '-g', '--grind', '-l', '--list', '-m', '--multiple', '-n', '--nowaf', '-p first', '--pyexample=first', '-r', '--retain', '-s ns3-tcp-interoperability', '--suite=ns3-tcp-interoperability', '-t t_opt.txt', '--text=t_opt.txt && rm -rf t_opt.txt', '-v', '--verbose', '-w t_opt.html && rm -rf t_opt.html', '--web=t_opt.html && rm -rf t_opt.html', '--html=t_opt.html && rm -rf t_opt.html', '-x t_opt.xml && rm -rf t_opt.xml', '--xml=t_opt.xml && rm -rf t_opt.xml', ] configure_string = sys.executable + ' waf configure --enable-tests --enable-examples' clean_string = sys.executable + ' waf clean' cmd_execute_list = [ '%s && %s test.py %s && %s' % (configure_string, sys.executable, option, clean_string) for option in test_cases] runner = TestBaseClass(argv[1:], "Test suite for the ns-3 unit test runner" , 'test-py') return runner.runtests(cmd_execute_list) if __name__ == '__main__': sys.exit(main(sys.argv))
gpl-2.0
abhinavdangeti/ep-engine
management/mc_bin_server.py
11
13932
#!/usr/bin/env python """ A memcached test server. Copyright (c) 2007 Dustin Sallings <dustin@spy.net> """ import asyncore import random import string import socket import struct import time import hmac import heapq import memcacheConstants from memcacheConstants import MIN_RECV_PACKET, REQ_PKT_FMT, RES_PKT_FMT from memcacheConstants import INCRDECR_RES_FMT from memcacheConstants import REQ_MAGIC_BYTE, RES_MAGIC_BYTE, EXTRA_HDR_FMTS VERSION="1.0" class BaseBackend(object): """Higher-level backend (processes commands and stuff).""" # Command IDs to method names. This is used to build a dispatch dict on # the fly. CMDS={ memcacheConstants.CMD_GET: 'handle_get', memcacheConstants.CMD_GETQ: 'handle_getq', memcacheConstants.CMD_SET: 'handle_set', memcacheConstants.CMD_ADD: 'handle_add', memcacheConstants.CMD_REPLACE: 'handle_replace', memcacheConstants.CMD_DELETE: 'handle_delete', memcacheConstants.CMD_INCR: 'handle_incr', memcacheConstants.CMD_DECR: 'handle_decr', memcacheConstants.CMD_QUIT: 'handle_quit', memcacheConstants.CMD_FLUSH: 'handle_flush', memcacheConstants.CMD_NOOP: 'handle_noop', memcacheConstants.CMD_VERSION: 'handle_version', memcacheConstants.CMD_APPEND: 'handle_append', memcacheConstants.CMD_PREPEND: 'handle_prepend', memcacheConstants.CMD_SASL_LIST_MECHS: 'handle_sasl_mechs', memcacheConstants.CMD_SASL_AUTH: 'handle_sasl_auth', memcacheConstants.CMD_SASL_STEP: 'handle_sasl_step', } def __init__(self): self.handlers={} self.sched=[] for id, method in self.CMDS.iteritems(): self.handlers[id]=getattr(self, method, self.handle_unknown) def _splitKeys(self, fmt, keylen, data): """Split the given data into the headers as specified in the given format, the key, and the data. Return (hdrTuple, key, data)""" hdrSize=struct.calcsize(fmt) assert hdrSize <= len(data), "Data too short for " + fmt + ': ' + `data` hdr=struct.unpack(fmt, data[:hdrSize]) assert len(data) >= hdrSize + keylen key=data[hdrSize:keylen+hdrSize] assert len(key) == keylen, "len(%s) == %d, expected %d" \ % (key, len(key), keylen) val=data[keylen+hdrSize:] return hdr, key, val def _error(self, which, msg): return which, 0, msg def processCommand(self, cmd, keylen, vb, cas, data): """Entry point for command processing. Lower level protocol implementations deliver values here.""" now=time.time() while self.sched and self.sched[0][0] <= now: print "Running delayed job." heapq.heappop(self.sched)[1]() hdrs, key, val=self._splitKeys(EXTRA_HDR_FMTS.get(cmd, ''), keylen, data) return self.handlers.get(cmd, self.handle_unknown)(cmd, hdrs, key, cas, val) def handle_noop(self, cmd, hdrs, key, cas, data): """Handle a noop""" print "Noop" return 0, 0, '' def handle_unknown(self, cmd, hdrs, key, cas, data): """invoked for any unknown command.""" return self._error(memcacheConstants.ERR_UNKNOWN_CMD, "The command %d is unknown" % cmd) class DictBackend(BaseBackend): """Sample backend implementation with a non-expiring dict.""" def __init__(self): super(DictBackend, self).__init__() self.storage={} self.held_keys={} self.challenge = ''.join(random.sample(string.ascii_letters + string.digits, 32)) def __lookup(self, key): rv=self.storage.get(key, None) if rv: now=time.time() if now >= rv[1]: print key, "expired" del self.storage[key] rv=None else: print "Miss looking up", key return rv def handle_get(self, cmd, hdrs, key, cas, data): val=self.__lookup(key) if val: rv = 0, id(val), struct.pack( memcacheConstants.GET_RES_FMT, val[0]) + str(val[2]) else: rv=self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found') return rv def handle_set(self, cmd, hdrs, key, cas, data): print "Handling a set with", hdrs val=self.__lookup(key) exp, flags=hdrs def f(val): return self.__handle_unconditional_set(cmd, hdrs, key, data) return self._withCAS(key, cas, f) def handle_getq(self, cmd, hdrs, key, cas, data): rv=self.handle_get(cmd, hdrs, key, cas, data) if rv[0] == memcacheConstants.ERR_NOT_FOUND: print "Swallowing miss" rv = None return rv def __handle_unconditional_set(self, cmd, hdrs, key, data): exp=hdrs[1] # If it's going to expire soon, tell it to wait a while. if exp == 0: exp=float(2 ** 31) self.storage[key]=(hdrs[0], time.time() + exp, data) print "Stored", self.storage[key], "in", key if key in self.held_keys: del self.held_keys[key] return 0, id(self.storage[key]), '' def __mutation(self, cmd, hdrs, key, data, multiplier): amount, initial, expiration=hdrs rv=self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found') val=self.storage.get(key, None) print "Mutating %s, hdrs=%s, val=%s %s" % (key, `hdrs`, `val`, multiplier) if val: val = (val[0], val[1], max(0, long(val[2]) + (multiplier * amount))) self.storage[key]=val rv=0, id(val), str(val[2]) else: if expiration != memcacheConstants.INCRDECR_SPECIAL: self.storage[key]=(0, time.time() + expiration, initial) rv=0, id(self.storage[key]), str(initial) if rv[0] == 0: rv = rv[0], rv[1], struct.pack( memcacheConstants.INCRDECR_RES_FMT, long(rv[2])) print "Returning", rv return rv def handle_incr(self, cmd, hdrs, key, cas, data): return self.__mutation(cmd, hdrs, key, data, 1) def handle_decr(self, cmd, hdrs, key, cas, data): return self.__mutation(cmd, hdrs, key, data, -1) def __has_hold(self, key): rv=False now=time.time() print "Looking for hold of", key, "in", self.held_keys, "as of", now if key in self.held_keys: if time.time() > self.held_keys[key]: del self.held_keys[key] else: rv=True return rv def handle_add(self, cmd, hdrs, key, cas, data): rv=self._error(memcacheConstants.ERR_EXISTS, 'Data exists for key') if key not in self.storage and not self.__has_hold(key): rv=self.__handle_unconditional_set(cmd, hdrs, key, data) return rv def handle_replace(self, cmd, hdrs, key, cas, data): rv=self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found') if key in self.storage and not self.__has_hold(key): rv=self.__handle_unconditional_set(cmd, hdrs, key, data) return rv def handle_flush(self, cmd, hdrs, key, cas, data): timebomb_delay=hdrs[0] def f(): self.storage.clear() self.held_keys.clear() print "Flushed" if timebomb_delay: heapq.heappush(self.sched, (time.time() + timebomb_delay, f)) else: f() return 0, 0, '' def handle_delete(self, cmd, hdrs, key, cas, data): def f(val): rv=self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found') if val: del self.storage[key] rv = 0, 0, '' print "Deleted", key, hdrs[0] if hdrs[0] > 0: self.held_keys[key] = time.time() + hdrs[0] return rv return self._withCAS(key, cas, f) def handle_version(self, cmd, hdrs, key, cas, data): return 0, 0, "Python test memcached server %s" % VERSION def _withCAS(self, key, cas, f): val=self.storage.get(key, None) if cas == 0 or (val and cas == id(val)): rv=f(val) elif val: rv = self._error(memcacheConstants.ERR_EXISTS, 'Exists') else: rv = self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found') return rv def handle_prepend(self, cmd, hdrs, key, cas, data): def f(val): self.storage[key]=(val[0], val[1], data + val[2]) return 0, id(self.storage[key]), '' return self._withCAS(key, cas, f) def handle_append(self, cmd, hdrs, key, cas, data): def f(val): self.storage[key]=(val[0], val[1], val[2] + data) return 0, id(self.storage[key]), '' return self._withCAS(key, cas, f) def handle_sasl_mechs(self, cmd, hdrs, key, cas, data): return 0, 0, 'PLAIN CRAM-MD5' def handle_sasl_step(self, cmd, hdrs, key, cas, data): assert key == 'CRAM-MD5' u, resp = data.split(' ', 1) expected = hmac.HMAC('testpass', self.challenge).hexdigest() if u == 'testuser' and resp == expected: print "Successful CRAM-MD5 auth." return 0, 0, 'OK' else: print "Errored a CRAM-MD5 auth." return self._error(memcacheConstants.ERR_AUTH, 'Auth error.') def _handle_sasl_auth_plain(self, data): foruser, user, passwd = data.split("\0") if user == 'testuser' and passwd == 'testpass': print "Successful plain auth" return 0, 0, "OK" else: print "Bad username/password: %s/%s" % (user, passwd) return self._error(memcacheConstants.ERR_AUTH, 'Auth error.') def _handle_sasl_auth_cram_md5(self, data): assert data == '' print "Issuing %s as a CRAM-MD5 challenge." % self.challenge return memcacheConstants.ERR_AUTH_CONTINUE, 0, self.challenge def handle_sasl_auth(self, cmd, hdrs, key, cas, data): mech = key if mech == 'PLAIN': return self._handle_sasl_auth_plain(data) elif mech == 'CRAM-MD5': return self._handle_sasl_auth_cram_md5(data) else: print "Unhandled auth type: %s" % mech return self._error(memcacheConstants.ERR_AUTH, 'Auth error.') class MemcachedBinaryChannel(asyncore.dispatcher): """A channel implementing the binary protocol for memcached.""" # Receive buffer size BUFFER_SIZE = 4096 def __init__(self, channel, backend, wbuf=""): asyncore.dispatcher.__init__(self, channel) self.log_info("New bin connection from %s" % str(self.addr)) self.backend=backend self.wbuf=wbuf self.rbuf="" def __hasEnoughBytes(self): rv=False if len(self.rbuf) >= MIN_RECV_PACKET: magic, cmd, keylen, extralen, datatype, vb, remaining, opaque, cas=\ struct.unpack(REQ_PKT_FMT, self.rbuf[:MIN_RECV_PACKET]) rv = len(self.rbuf) - MIN_RECV_PACKET >= remaining return rv def processCommand(self, cmd, keylen, vb, cas, data): return self.backend.processCommand(cmd, keylen, vb, cas, data) def handle_read(self): self.rbuf += self.recv(self.BUFFER_SIZE) while self.__hasEnoughBytes(): magic, cmd, keylen, extralen, datatype, vb, remaining, opaque, cas=\ struct.unpack(REQ_PKT_FMT, self.rbuf[:MIN_RECV_PACKET]) assert magic == REQ_MAGIC_BYTE assert keylen <= remaining, "Keylen is too big: %d > %d" \ % (keylen, remaining) assert extralen == memcacheConstants.EXTRA_HDR_SIZES.get(cmd, 0), \ "Extralen is too large for cmd 0x%x: %d" % (cmd, extralen) # Grab the data section of this request data=self.rbuf[MIN_RECV_PACKET:MIN_RECV_PACKET+remaining] assert len(data) == remaining # Remove this request from the read buffer self.rbuf=self.rbuf[MIN_RECV_PACKET+remaining:] # Process the command cmdVal = self.processCommand(cmd, keylen, vb, extralen, cas, data) # Queue the response to the client if applicable. if cmdVal: try: status, cas, response = cmdVal except ValueError: print "Got", cmdVal raise dtype=0 extralen=memcacheConstants.EXTRA_HDR_SIZES.get(cmd, 0) self.wbuf += struct.pack(RES_PKT_FMT, RES_MAGIC_BYTE, cmd, keylen, extralen, dtype, status, len(response), opaque, cas) + response def writable(self): return self.wbuf def handle_write(self): sent = self.send(self.wbuf) self.wbuf = self.wbuf[sent:] def handle_close(self): self.log_info("Disconnected from %s" % str(self.addr)) self.close() class MemcachedServer(asyncore.dispatcher): """A memcached server.""" def __init__(self, backend, handler, port=11211): asyncore.dispatcher.__init__(self) self.handler=handler self.backend=backend self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.set_reuse_addr() self.bind(("", port)) self.listen(5) self.log_info("Listening on %d" % port) def handle_accept(self): channel, addr = self.accept() self.handler(channel, self.backend) if __name__ == '__main__': port = 11211 import sys if sys.argv > 1: port = int(sys.argv[1]) server = MemcachedServer(DictBackend(), MemcachedBinaryChannel, port=port) asyncore.loop()
apache-2.0
lukauskas/means
src/means/tests/test_eq_mixed_moments.py
2
1938
import unittest import sympy from means.approximation.mea.eq_mixed_moments import DBetaOverDtCalculator from means.core import Moment from means.util.sympyhelpers import to_sympy_matrix, assert_sympy_expressions_equal class TestEqMixedMoments(unittest.TestCase): def test_for_p53(self): """ Given the preopensities, Given the soichiometry matrix, Given the counter (list of Moments), Given the species list, Given k_vector and Given ek_counter (list of moment) The answer should match exactly the expected result :return: """ stoichio = sympy.Matrix([ [1, -1, -1, 0, 0, 0], [0, 0, 0, 1, -1, 0], [0, 0, 0, 0, 1, -1] ]) propensities = to_sympy_matrix([ [" c_0"], [" c_1*y_0"], ["c_2*y_0*y_2/(c_6 + y_0)"], [" c_3*y_0"], [" c_4*y_1"], [" c_5*y_2"]]) counter = [ Moment([0, 0, 0], 1), Moment([0, 0, 2], sympy.Symbol("yx1")), Moment([0, 1, 1], sympy.Symbol("yx2")), Moment([0, 2, 0], sympy.Symbol("yx3")), Moment([1, 0, 1], sympy.Symbol("yx4")), Moment([1, 1, 0], sympy.Symbol("yx5")), Moment([2, 0, 0], sympy.Symbol("yx6")) ] species = sympy.Matrix(["y_0", "y_1", "y_2"]) dbdt_calc = DBetaOverDtCalculator(propensities, counter,stoichio, species) k_vec = [1, 0, 0] ek_counter = [Moment([1, 0, 0], sympy.Symbol("y_0"))] answer = dbdt_calc.get(k_vec,ek_counter).T result = to_sympy_matrix(["c_0 - c_1*y_0 - c_2*y_0*y_2/(c_6 + y_0)"," 0"," 0"," 0"," c_2*y_0/(c_6 + y_0)**2 - c_2/(c_6 + y_0)"," 0"," -c_2*y_0*y_2/(c_6 + y_0)**3 + c_2*y_2/(c_6 + y_0)**2"]) assert_sympy_expressions_equal(answer, result)
mit
mnick/10c
tenc/__init__.py
1
1626
from _tenc import TZArchive from _tenc import MAP_ORDER as MAP available_parsers = dict() available_serializers = dict() # -- Convenience Functions -- def entities_index(archive_path, prefix=None, fprune=None): elements = __extract_index(archive_path, TZArchive.ENTITIES_FOUT, prefix) return __prune_elements(elements, fprune) def predicates_index(archive_path, prefix=None, fprune=None): elements = __extract_index(archive_path, TZArchive.PREDICATES_FOUT, prefix) return __prune_elements(elements, fprune) def entity_attributes_index(archive_path, prefix=None): return __extract_index(archive_path, TZArchive.ENTITIES_FOUT + '_attr', prefix) def predicate_attributes_index(archive_path, prefix=None): return __extract_index(archive_path, TZArchive.PREDICATES_FOUT + '_attr', prefix) def __extract_index(farc, fin, prefix=None): import tarfile from _tenc import read_tensor_index, fjoin with tarfile.open(farc, 'r:bz2') as arc: m = arc.extractfile(fjoin(fin, TZArchive.MAP_SUFFIX, prefix)) idx = read_tensor_index(m) return idx def __prune_elements(elements, fprune): from _tenc import read_tensor_index if fprune is not None: with open(fprune, 'rb') as fin: idx = read_tensor_index(fin) elements = [elements[int(i)] for i in idx] return elements def register_parser(name, description): def _reg(cls): available_parsers[name] = (cls, description) return _reg def register_serializer(name, description): def _reg(cls): available_serializers[name] = (cls, description) return _reg
gpl-3.0
nischal2002/m-quiz-2016
classFour.py
1
2882
import random # This allows you to generate random numbers import os # This module is used to check if we are running on pi import quizEngine if (os.uname().nodename == 'raspberrypi'): from ledControl import greenLedOn from ledControl import redLedOn from ledControl import redLedBuzzerOn else: from ledControlNoop import greenLedOn from ledControlNoop import redLedOn from ledControlNoop import redLedBuzzerOn TIMEOUT=10 # this is the amount of time you will wait for an answer in Seconds. 10 means 10 seconds QUESTION_COUNT=10 ANSWER_GOAL=8 LED_ON_TIME=2 BETTER_LUCK_TIME=3 def classFourQuiz(): # Class 1 '''this will the Quiz for CLASS 1''' questionCount = 0 correctAnswers = 0 while(questionCount < QUESTION_COUNT): questionTxt, answer = getQuestionClassFour() if(quizEngine.runQuestion(questionTxt, answer, TIMEOUT)): greenLedOn(LED_ON_TIME) correctAnswers = correctAnswers + 1 else: redLedBuzzerOn(LED_ON_TIME) questionCount = questionCount + 1 print("You got " + str(correctAnswers) + " correct"); if(correctAnswers >= ANSWER_GOAL): print("congratulations!,you win!!!") greenLedOn(BETTER_LUCK_TIME) else: print("sorry,better luck next time:)") redLedBuzzerOn(BETTER_LUCK_TIME) def getQuestionClassFour(): #class 4 ''' This will ask a addition question and will wait for user input and will validate and input. It will terurn boolean true or false based on if the user input is correct or wrong ''' if (random.randrange(0,2) == 1): a = getRandomTripleDigitInt() b = getRandomDoubleSingleDigitInt() qText = "You have "+str(TIMEOUT)+" seconds to answer this question\n" qText += str(a) + " + " + str(b) + " is : " return (qText,(a+b)) else: a = getRandomTripleDigitInt() b = getRandomDoubleSingleDigitInt() qText = "You have "+str(TIMEOUT)+" seconds to answer this question\n" qText += str(a) + " - " + str(b) + " is : " return (qText,(a-b)) def getRandomDoubleSingleDigitInt(): #class 4 ''' return a random single digit positive integer ''' return random.randrange(0,100) # this returns a single digit random number(integer). Number in python means decimal number def getRandomTripleDigitInt(): #class 4 ''' return a random single digit positive integer ''' return random.randrange(100,500) # this returns a single digit random number(integer). Number in python means decimal number def getRandomDoubleSingleDigitIntBelow(x): #class 4 ''' return a random single digit positive integer ''' if x != 0: return random.randrange(0,x+1) # this returns a single digit random number(integer). Number in python means decimal number else : return 0
mit
stuart-knock/tvb-framework
tvb/core/removers_factory.py
2
2172
# -*- coding: utf-8 -*- # # # TheVirtualBrain-Framework Package. This package holds all Data Management, and # Web-UI helpful to run brain-simulations. To use it, you also need do download # TheVirtualBrain-Scientific Package (for simulators). See content of the # documentation-folder for more details. See also http://www.thevirtualbrain.org # # (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest") # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License version 2 as published by the Free # Software Foundation. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. You should have received a copy of the GNU General # Public License along with this program; if not, you can download it here # http://www.gnu.org/licenses/old-licenses/gpl-2.0 # # # CITATION: # When using The Virtual Brain for scientific publications, please cite it as follows: # # Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide, # Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013) # The Virtual Brain: a simulator of primate brain network dynamics. # Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010) # # """ Created on Nov 1, 2011 .. moduleauthor:: Bogdan Neacsa <bogdan.neacsa@codemart.ro> """ ####### Default factory is empty FACTORY_DICTIONARY = {} def get_remover(datatype_name): """ :param datatype_name: class-name; to search for a remover class for this. """ if isinstance(datatype_name, str) or isinstance(datatype_name, unicode): datatype_name = datatype_name.split('.')[-1] else: datatype_name = datatype_name.__name__ if datatype_name in FACTORY_DICTIONARY: return FACTORY_DICTIONARY[datatype_name] else: return FACTORY_DICTIONARY['default'] def update_dictionary(new_dict): """ Update removers dictionary. """ FACTORY_DICTIONARY.update(new_dict)
gpl-2.0
Jgarcia-IAS/localizacion
openerp/addons/crm_claim/report/__init__.py
446
1080
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import crm_claim_report # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
TeamHG-Memex/frontera
frontera/tests/test_revisiting_backend.py
1
1723
# -*- coding: utf-8 -*- from frontera.tests.backends import BackendSequenceTest, TEST_SITES from frontera.utils.tester import FrontierTester from datetime import timedelta import pytest from time import sleep class RevisitingFrontierTester(FrontierTester): def run(self, add_all_pages=False): if not self.frontier.auto_start: self.frontier.start() if not add_all_pages: self._add_seeds() else: self._add_all() while not self.frontier.finished: result = self._run_iteration() self.sequence.append(result) requests, iteration, dl_info = result if self.downloader_simulator.idle(): sleep(0.5) if iteration == 5: break self.frontier.stop() class RevisitingBackendTest(BackendSequenceTest): def get_settings(self): settings = super(RevisitingBackendTest, self).get_settings() settings.set("SQLALCHEMYBACKEND_REVISIT_INTERVAL", timedelta(seconds=2)) settings.SQLALCHEMYBACKEND_ENGINE = 'sqlite:///:memory:' return settings @pytest.mark.parametrize( ('site_list', 'max_next_requests'), [ ('SITE_01', 5), ('SITE_02', 10), ] ) def test_sequence(self, site_list, max_next_requests): sequence = self.get_url_sequence( site_list=TEST_SITES[site_list], max_next_requests=max_next_requests, frontier_tester=RevisitingFrontierTester ) seen = set() for url in sequence: if url in seen: return seen.add(url) assert False, "None of the URLs were revisted"
bsd-3-clause
lnielsen/invenio
invenio/legacy/bibsword/client_templates.py
37
41746
# -*- coding: utf-8 -*- ## This file is part of Invenio. ## Copyright (C) 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ''' BibSWORD Client Templates ''' from invenio.config import CFG_SITE_URL, CFG_SITE_NAME, CFG_SITE_RECORD class BibSwordTemplate: ''' This class contains attributes and methods that allows to display all information used by the BibSword web user interface. Theses informations are form, validation or error messages ''' def __init__(self): ''' No init necessary for this class ''' #--------------------------------------------------------------------------- # BibSword WebSubmit Interface #--------------------------------------------------------------------------- def tmpl_display_submit_ack(self, remote_id, link): ''' This method generate the html code that displays the acknoledgement message after the submission of a record. @param remote_id: id of the record given by arXiv @param link: links to modify or consult submission @return: string containing the html code ''' html = '' html += '''<h1>Success !</h1>''' html += '''<p>The record has been successfully pushed to arXiv ! <br />''' \ '''You will get an email once it will be accepted by ''' \ '''arXiv moderator.</p>''' html += '''<p>The arXiv id of the submission is: <b>%s</b></p>''' % \ remote_id html += '''<p><a href="www.arxiv.org/user">Manage your submission</a></p>''' return html #--------------------------------------------------------------------------- # BibSword Administrator Interface #--------------------------------------------------------------------------- def tmpl_display_admin_page(self, submissions, first_row, last_row, total_rows, is_prev, is_last, offset, error_messages=None): ''' format the html code that display the submission table @param submissions: list of all submissions and their status @return: html code to be displayed ''' if error_messages == None: error_messages = [] body = ''' <form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword"> %(error_message)s <input type="hidden" name="status" value="display_submission"/> <input type="hidden" name="first_row" value="%(first_row)s"/> <input type="hidden" name="last_row" value="%(last_row)s"/> <input type="hidden" name="total_rows" value="%(total_rows)s" /> <input type="submit" name="submit" value="New submission"/><br/> <br /> <input type="submit" name="submit" value="Refresh all"/><br/> <br /> Display <select name="offset"> <option value="5" %(selected_1)s>5</option> <option value="10" %(selected_2)s>10</option> <option value="25" %(selected_3)s>25</option> <option value="50" %(selected_4)s>50</option> <option value=%(total_rows)s %(selected_5)s>all</option> </select> rows per page <input type="submit" name="submit" value="Select" /><br /> <br /> <input type="submit" name="submit" value="First" %(is_prev)s/> <input type="submit" name="submit" value="Prev" %(is_prev)s/> Pages %(first_row)s - %(last_row)s / %(total_rows)s <input type="submit" name="submit" value="Next" %(is_last)s/> <input type="submit" name="submit" value="Last" %(is_last)s/><br/> <table border="1" valign="top" width="%(table_width)s"> <tr> <td align="left" colspan="7" bgcolor="#e6e6fa"> <h2>Submission state</h2> </td> </tr> <tr> <td align="center" bgcolor="#e6e6fa"><b>Remote server</b></td> <td align="center" bgcolor="#e6e6fa"><b>Submitter</b></td> <td align="center" bgcolor="#e6e6fa"><b>Record number</b></td> <td align="center" bgcolor="#e6e6fa"><b>Remote id</b></td> <td align="center" bgcolor="#e6e6fa"><b>Status</b></td> <td align="center" bgcolor="#e6e6fa"><b>Dates</b></td> <td align="center" bgcolor="#e6e6fa"><b>Links</b></td> </tr> %(submissions)s </table> </form>''' % { 'error_message': \ self.display_error_message_row(error_messages), 'table_width' : '100%', 'first_row' : first_row, 'last_row' : last_row, 'total_rows' : total_rows, 'is_prev' : is_prev, 'is_last' : is_last, 'selected_1' : offset[0], 'selected_2' : offset[1], 'selected_3' : offset[2], 'selected_4' : offset[3], 'selected_5' : offset[4], 'submissions' : self.fill_submission_table(submissions) } return body def tmpl_display_remote_server_info(self, server_info): ''' Display a table containing all server informations @param server_info: tuple containing all server infos @return: html code for the table containing infos ''' body = '''<table width="%(table_width)s">\n''' \ ''' <tr>\n''' \ ''' <td bgcolor="#e6e6fa">ID</td>\n''' \ ''' <td>%(server_id)s</td>\n''' \ ''' </tr>\n ''' \ ''' <tr>\n''' \ ''' <td bgcolor="#e6e6fa">Name</td>\n''' \ ''' <td>%(server_name)s</td>\n''' \ ''' </tr>\n ''' \ ''' <tr>\n''' \ ''' <td bgcolor="#e6e6fa">Host</td>\n''' \ ''' <td>%(server_host)s</td>\n''' \ ''' </tr>\n ''' \ ''' <tr>\n''' \ ''' <td bgcolor="#e6e6fa">Username</td>\n''' \ ''' <td>%(username)s</td>\n''' \ ''' </tr>\n ''' \ ''' <tr>\n''' \ ''' <td bgcolor="#e6e6fa">Password</td>\n''' \ ''' <td>%(password)s</td>\n''' \ ''' </tr>\n ''' \ ''' <tr>\n''' \ ''' <td bgcolor="#e6e6fa">Email</td>\n''' \ ''' <td>%(email)s</td>\n''' \ ''' </tr>\n ''' \ ''' <tr>\n''' \ ''' <td bgcolor="#e6e6fa">Realm</td>\n''' \ ''' <td>%(realm)s</td>\n''' \ ''' </tr>\n ''' \ ''' <tr>\n''' \ ''' <td bgcolor="#e6e6fa">Record URL</td>\n''' \ ''' <td>%(url_base_record)s</td>\n''' \ ''' </tr>\n ''' \ ''' <tr>\n''' \ ''' <td bgcolor="#e6e6fa">URL Servicedocument</td>\n'''\ ''' <td>%(url_servicedocument)s</td>\n''' \ ''' </tr>\n ''' \ '''</table>''' % { 'table_width' : '50%', 'server_id' : server_info['server_id'], 'server_name' : server_info['server_name'], 'server_host' : server_info['server_host'], 'username' : server_info['username'], 'password' : server_info['password'], 'email' : server_info['email'], 'realm' : server_info['realm'], 'url_base_record' : server_info['url_base_record'], 'url_servicedocument': server_info['url_servicedocument'] } return body def tmpl_display_remote_servers(self, remote_servers, id_record, error_messages): ''' format the html code that display a dropdown list containing the servers @param self: reference to the current instance of the class @param remote_servers: list of tuple containing server's infos @return: string containing html code ''' body = ''' <form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword"> <input type="hidden" name="status" value="select_server"/> %(error_message)s <input type="submit" name="submit" value="Cancel" /> <table border="1" valign="top" width="%(table_width)s"> <tr> <td align="left" colspan="2" bgcolor="#e6e6fa"> <h2>Forward a record</h2> </td> </tr> <tr> <td align="right" width="%(row_width)s"> <p>Enter the number of the report to submit: </p> </td> <td align="left" width="%(row_width)s"> <input type="text" name="id_record" size="20" value="%(id_record)s"/> </td> </tr> <tr> <td align="right" width="%(row_width)s"> <p>Select a remote server: </p> </td> <td align="left" width="%(row_width)s"> <select name="id_remote_server" size="1"> <option value="0">-- select a remote server --</option> %(remote_server)s </select> </td> </tr> <tr> <td colspan="2" align="center"> <input type="submit" value="Select" name="submit"/> </td> </tr> </table> </form>''' % { 'error_message': \ self.display_error_message_row(error_messages), 'table_width' : '100%', 'row_width' : '50%', 'id_record' : id_record, 'remote_server': \ self.fill_dropdown_remote_servers(remote_servers) } return body def tmpl_display_collections(self, selected_server, server_infos, collections, id_record, recid, error_messages): ''' format the html code that display the selected server, the informations about the selected server and a dropdown list containing the server's collections @param self: reference to the current instance of the class @param selected_server: tuple containing selected server name and id @param server_infos: tuple containing infos about selected server @param collections: list contianing server's collections @return: string containing html code ''' body = ''' <form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword"> <input type="hidden" name="status" value="select_collection"/> <input type="hidden" name="id_remote_server" value="%(id_server)s"/> <input type="hidden" name="id_record" value="%(id_record)s"/> <input type="hidden" name="recid" value="%(recid)s"/> %(error_message)s <input type="submit" name="submit" value="Cancel" /> <table border="1" valign="top" width="%(table_width)s"> <tr> <td align="left" colspan="2" bgcolor="#e6e6fa"> <h2>Remote server</h2></td> </tr> <tr> <td align="center" rowspan="2" valign="center"> <h2>%(server_name)s</h2> </td> <td align="left"> SWORD version: %(server_version)s </td> </tr> <tr> <td align="left"> Max upload size [Kb]: %(server_maxUpload)s </td> </tr> <tr> <td align="left" colspan="2"> <input type="submit" value="Modify server" name="submit"/> </td> </tr> </table> <p> </p> <table border="1" valign="top" width="%(table_width)s"> <tr> <td align="left" colspan="2" bgcolor="#e6e6fa"><h2>Collection</h2> </td> </tr> <tr> <td align="right" width="%(row_width)s">Select a collection: </td> <td align="left" width="%(row_width)s"> <select name="id_collection" size="1"> <option value="0">-- select a collection --</option> %(collection)s </select> </td> </tr> <tr> <td align="center" colspan="2"> <input type="submit" value="Select" name="submit"/> </td> </tr> </table> </form>''' % { 'table_width' : '100%', 'row_width' : '50%', 'error_message' : \ self.display_error_message_row(error_messages), 'id_server' : selected_server['id'], 'server_name' : selected_server['name'], 'server_version' : server_infos['version'], 'server_maxUpload': server_infos['maxUploadSize'], 'collection' : \ self.fill_dropdown_collections(collections), 'id_record' : id_record, 'recid' : recid } return body def tmpl_display_categories(self, selected_server, server_infos, selected_collection, collection_infos, primary_categories, secondary_categories, id_record, recid, error_messages): ''' format the html code that display the selected server, the informations about the selected server, the selected collections, the informations about the collection and a dropdown list containing the server's primary and secondary categories @param self: reference to the current instance of the class @param selected_server: tuple containing selected server name and id @param server_infos: tuple containing infos about selected server @param selected_collection: selected collection @param collection_infos: tuple containing infos about selected col @param primary_categories: list of mandated categories for the col @return: string containing html code ''' body = ''' <form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword"> <input type="hidden" name="status" value="select_primary_category"/> <input type="hidden" name="id_remote_server" value="%(id_server)s"/> <input type="hidden" name="id_collection" value="%(id_collection)s"/> <input type="hidden" name="id_record" value="%(id_record)s"/> <input type="hidden" name="recid" value="%(recid)s"/> %(error_message)s <input type="submit" name="submit" value="Cancel" /> <table border="1" valign="top" width="%(table_width)s"> <tr> <td align="left" colspan="2" bgcolor="#e6e6fa"> <h2>Remote server</h2> </td> </tr> <tr> <td align="center" rowspan="2" valign="center"> <h2>%(server_name)s</h2> </td> <td align="left"> SWORD version: %(server_version)s </td> </tr> <tr> <td align="left"> Max upload size [Kb]: %(server_maxUpload)s </td> </tr> <tr> <td align="left" colspan="2"> <input type="submit" value="Modify server" name="submit"/> </td> </tr> </table> <p> </p> <table border="1" valign="top" width="%(table_width)s"> <tr> <td align="left" colspan="2" bgcolor="#e6e6fa"> <h2>Collection</h2> </td> </tr> <tr> <td align="center" rowspan="2" valign="center"> <h2>%(collection_name)s</h2> </td> <td align="left"> URL: %(collection_url)s </td> </tr> <tr> <td align="left"> Accepted media types: <ul>%(collection_accept)s</ul> </td> </tr> <tr> <td align="left" colspan=2> <input type="submit" value="Modify collection" name="submit"/> </td> </tr> </table> <p> </p> <table border="1" valign="top" width="%(table_width)s"> <tr> <td align="left" colspan="2" bgcolor="#e6e6fa"> <h2>Mandatory category</h2> </td> </tr> <tr> <td align="right" width="%(row_width)s"> <p>Select a mandated category: </p> </td> <td align="left" width="%(row_width)s"> <select name="id_primary" size="1"> <option value="0">-- select a category --</option> %(primary_categories)s </select> </td> </tr> </table> <p></p> <table border="1" valign="top" width="%(table_width)s"> <tr> <td align="left" colspan="2" bgcolor="#e6e6fa"> <h2>Optional categories</h2> </td> </tr> <td align="right" width="%(row_width)s"> <p>Select optional categories: </p> </td> <td align="left" width="%(row_width)s"> <select name="id_categories" size="10" multiple> %(secondary_categories)s </select> </td> </tr> </table> <p> </p> <center> <input type="submit" value="Select" name="submit"/> </center> </form>''' % { 'table_width' : '100%', 'row_width' : '50%', 'error_message' : self.display_error_message_row( error_messages), # hidden input 'id_server' : selected_server['id'], 'id_collection' : selected_collection['id'], 'id_record' : id_record, 'recid' : recid, # variables values 'server_name' : selected_server['name'], 'server_version' : server_infos['version'], 'server_maxUpload' : server_infos['maxUploadSize'], 'collection_name' : selected_collection['label'], 'collection_accept': ''.join([ '''<li>%(name)s </li>''' % { 'name': accept } for accept in collection_infos['accept'] ]), 'collection_url' : selected_collection['url'], 'primary_categories' : self.fill_dropdown_primary( primary_categories), 'secondary_categories': self.fill_dropdown_secondary( secondary_categories) } return body def tmpl_display_metadata(self, user, server, collection, primary, categories, medias, metadata, id_record, recid, error_messages): ''' format a string containing every informations before a submission ''' body = ''' <form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword"> <input type="hidden" name="status" value="check_submission"/> <input type="hidden" name="id_remote_server" value="%(id_server)s"/> <input type="hidden" name="id_collection" value="%(id_collection)s"/> <input type="hidden" name="id_primary" value="%(id_primary)s"/> <input type="hidden" name="id_categories" value="%(id_categories)s"/> <input type="hidden" name="id_record" value="%(id_record)s"/> <input type="hidden" name="recid" value="%(recid)s"/> %(error_message)s <input type="submit" name="submit" value="Cancel" /> <table border="1" valign="top" width="%(table_width)s"> <tr> <td align="left" colspan="2" bgcolor="#e6e6fa"> <h2>Destination</h2> </td> </tr> <tr> <td align="center" rowspan="3" valign="center"> <h2>%(server_name)s</h2> </td> <td align="left"> Collection: %(collection_name)s ( %(collection_url)s ) </td> </tr> <tr> <td align="left"> Primary category: %(primary_name)s ( %(primary_url)s ) </td> </tr> %(categories)s <tr> <td align="left" colspan="2"> <input type="submit" value="Modify destination" name="submit"/> </td> </tr> </table> <p> </p> <table border="1" valign="top" width="%(table_width)s"> <tr> <td align="left" colspan="4" bgcolor="#e6e6fa"> <h2>Submitter</h2> </td> </tr> <tr> <td width="%(row_width)s">Name:</td> <td><input type="text" name="author_name" size="100" value="%(user_name)s"/></td> </tr> <tr> <td>Email:</td> <td><input type="text" name="author_email" size="100" value="%(user_email)s"/></td> </tr> </table> <p></p> <table border="1" valign="top" width="%(table_width)s"> <tr> <td align="left" colspan="4" bgcolor="#e6e6fa"><h2>Media</h2></td> </tr> <tr><td colspan="4">%(medias)s%(media_help)s</td></tr> </table> <p></p> <table border="1" valign="top" width="%(table_width)s"> <tr> <td align="left" colspan="3" bgcolor="#e6e6fa"><h2>Metadata</h2> <font color="red"><b>Warning:</b> modification(s) will not be saved on the %(CFG_SITE_NAME)s</font> </td> </tr> <tr> <td align="left" width="%(row_width)s"><p>Report Number<span style="color:#f00">*</span>:</p></td> <td><input type="text" name="id" size="100" value="%(id)s"/></td> </tr> <tr> <td align="left" width="%(row_width)s"><p>Title<span style="color:#f00">*</span>:</p></td> <td><input type="text" name="title" size="100" value="%(title)s"/> </td> </tr> <tr> <td align="left" width="%(row_width)s"><p>Summary<span style="color:#f00">*</span>:</p></td> <td> <textarea name="summary" rows="4" cols="100">%(summary)s </textarea> </td> </tr> %(contributors)s %(journal_refs)s %(report_nos)s </table> <p><font color="red">The fields having a * are mandatory</font></p> <center> <input type="submit" value="Submit" name="submit"/> </center> <form>''' % { 'table_width' : '100%', 'row_width' : '25%', 'error_message' : \ self.display_error_message_row(error_messages), 'CFG_SITE_NAME': CFG_SITE_NAME, # hidden input 'id_server' : server['id'], 'id_collection' : collection['id'], 'id_primary' : primary['id'], 'id_categories' : self.get_list_id_categories(categories), 'id_record' : id_record, 'recid' : recid, # variables values 'server_name' : server['name'], 'collection_name' : collection['label'], 'collection_url' : collection['url'], 'primary_name' : primary['label'], 'primary_url' : primary['url'], 'categories' : self.fill_optional_category_list(categories), #user 'user_name' : user['nickname'], 'user_email' : user['email'], # media 'medias' : self.fill_media_list(medias, server['id']), 'media_help' : self.fill_arxiv_help_message(), # metadata 'id' : metadata['id'], 'title' : metadata['title'], 'summary' : metadata['summary'], 'contributors' : self.fill_contributors_list( metadata['contributors']), 'journal_refs' : self.fill_journal_refs_list( metadata['journal_refs']), 'report_nos' : self.fill_report_nos_list( metadata['report_nos']) } return body def tmpl_display_list_submission(self, submissions): ''' Display the data of submitted recods ''' body = ''' <form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword"> <table border="1" valign="top" width="%(table_width)s"> <tr> <td align="left" colspan="7" bgcolor="#e6e6fa"> <h2>Document successfully submitted !</h2> </td> </tr> <tr> <td align="center" bgcolor="#e6e6fa"><b>Remote server</b></td> <td align="center" bgcolor="#e6e6fa"><b>Submitter</b></td> <td align="center" bgcolor="#e6e6fa"><b>Record id</b></td> <td align="center" bgcolor="#e6e6fa"><b>Remote id</b></td> <td align="center" bgcolor="#e6e6fa"><b>Status</b></td> <td align="center" bgcolor="#e6e6fa"><b>Dates</b></td> <td align="center" bgcolor="#e6e6fa"><b>Links</b></td> </tr> %(submissions)s </table> <a href=%(CFG_SITE_URL)s/bibsword>Return</a> </form>''' % { 'table_width' : '100%', 'submissions' : self.fill_submission_table(submissions), 'CFG_SITE_URL' : CFG_SITE_URL } return body #*************************************************************************** # Private functions #*************************************************************************** def display_error_message_row(self, error_messages): ''' return a list of error_message in form of a bullet list @param error_messages: list of error_messages to display @return: html code that display list of errors ''' # if no errors, return nothing if len(error_messages) == 0: return '' if len(error_messages) == 1: # display a generic header message body = ''' <tr> <td align="left" colspan=2> <font color='red'> <p> The following error was found: </p> <ul> ''' else: # display a generic header message body = ''' <tr> <td align="left" colspan=2> <font color='red'> <p> Following errors were found: </p> <ul> ''' # insert each error lines for error_message in error_messages: body = body + ''' <li>%(error)s</li>''' % { 'error': error_message } body = body + ''' </ul> </font> </td> </tr>''' return body def fill_submission_table(self, submissions): ''' This method return the body of the submission state table. each submissions given in parameters has one row @param submissions: submission status list @return: html table body ''' return ''.join([ ''' <tr> <td>%(id_server)s: <a href="%(server_infos)s"> %(server_name)s</a></td> <td>%(user_name)s <br/> %(user_email)s</td <td>%(id_bibrec)s: <a href="%(cfg_site_url)s/%(CFG_SITE_RECORD)s/%(id_bibrec)s" target="_blank">%(no_bibrec)s</a></td> <td><a href="%(url_base_remote)s/%(id_remote)s" target="_blank"> %(id_remote)s</a></td> <td>%(status)s</td> <td><b>submission: </b> %(submission_date)s <br/> <b>publication: </b> %(publication_date)s <br/> <b>removal: </b> %(removal_date)s </td> <td><b>media: </b> <a href="%(media_link)s" target="_blank"> %(media_link)s</a> <br/> <b>metadata: </b> <a href="%(metadata_link)s" target="_blank"> %(metadata_link)s</a> <br /> <b>status: </b> <a href="%(status_link)s" target="_blank"> %(status_link)s</a></td> </tr>''' % { 'id_server' : str(submission['id_server']), 'server_infos' : "%s/bibsword/remoteserverinfos?id=%s" % \ (CFG_SITE_URL, submission['id_server']), 'server_name' : str(submission['server_name']), 'user_name' : str(submission['user_name']), 'user_email' : str(submission['user_email']), 'id_bibrec' : str(submission['id_record']), 'no_bibrec' : str(submission['report_no']), 'id_remote' : str(submission['id_remote']), 'status' : str(submission['status']), 'submission_date' : str(submission['submission_date']), 'publication_date' : str(submission['publication_date']), 'removal_date' : str(submission['removal_date']), 'media_link' : str(submission['link_medias']), 'metadata_link' : str(submission['link_metadata']), 'status_link' : str(submission['link_status']), 'url_base_remote' : str(submission['url_base_remote']), 'cfg_site_url' : CFG_SITE_URL, 'CFG_SITE_RECORD' : CFG_SITE_RECORD } for submission in submissions]) def fill_dropdown_remote_servers(self, remote_servers): ''' This method fill a dropdown list of remote servers. @return: html code to display ''' return ''.join([ '''<option value="%(id)s">%(name)s - %(host)s</option>''' % { 'id': str(remote_server['id']), 'name': remote_server['name'], 'host': remote_server['host'] } for remote_server in remote_servers]) def fill_dropdown_collections(self, collections): ''' This method fill a dropdown list of collection. @param collections: list of all collections with name - url @return: html code to display ''' return ''.join([ '''<option value="%(id)s">%(name)s</option>''' % { 'id': str(collection['id']), 'name': collection['label'] } for collection in collections]) def fill_dropdown_primary(self, primary_categories): ''' This method fill the primary dropdown list with the data given in parameter @param primary_categories: list of 'url' 'name' tuples @return: html code generated to display the list ''' return ''.join([ '''<option value="%(id)s">%(name)s</option>''' % { 'id': primary_categorie['id'], 'name': primary_categorie['label'] } for primary_categorie in primary_categories]) def fill_dropdown_secondary(self, categories): ''' This method fill a category list. This list is allows the multi-selection or items. To proced to select more than one categorie through a browser ctrl + clic @param categories: list of all categories in the format name - url @return: the html code that display each dropdown list ''' if len(categories) == '': return '' return ''.join([ '''<option value="%(id)s">%(name)s</option>''' % { 'id': category['id'], 'name': category['label'] } for category in categories]) def fill_optional_category_list(self, categories): ''' This method fill a table row that contains name and url of the selected optional categories @param self: reference to the current instance of the class @param categories: list of tuples containing selected categories @return: html code generated to display the list ''' if len(categories) == 0: return '' else: body = '<tr><td>' body = body + ''.join([ '''<p>Category: %(category_name)s ( %(category_url)s )</p>'''%{ 'category_name' : category['label'], 'category_url' : category['url'] } for category in categories ]) body = body + '</td></tr>' return body def fill_media_list(self, medias, id_server, from_websubmit=False): ''' Concatenate the string that contains all informations about the medias ''' text = '' if id_server == 1: media_type = self.format_media_list_by_type(medias) text = '''<h2>Please select files you would like to push to arXiv:</h2>''' for mtype in media_type: text += '''<h3><b>%s: </b></h3>''' % mtype['media_type'] text += '''<blockquote>''' for media in mtype['media_list']: text += '''<input type='checkbox' name="media" value="%s" %s>%s</input><br />''' % (media['path'], media['selected'], media['name']) text += "</blockquote>" text += '''<h3>Upload</h3>''' text += '''<blockquote>''' text += '''<p>In addition, you can submit a new file (that will be added to the record as well):</p>''' if from_websubmit == False: text += '''<input type="file" name="new_media" size="60"/>''' return text def fill_arxiv_help_message(self): text = '''</blockquote><h3>Help</h3>''' text += '''<blockquote><p>For more help on which formats are supported by arXiv, please see:'''\ '''<ul>'''\ '''<li><a href="http://arxiv.org/help/submit" target="_blank">'''\ '''arXiv submission process</a></li>'''\ '''<li><a href="http://arxiv.org/help/submit_tex" target="_blank">'''\ '''arXiv TeX submission</a></li>'''\ '''<li><a href="http://arxiv.org/help/submit_docx" target="_blank">'''\ '''arXiv Docx submission</a></li>'''\ '''<li><a href="http://arxiv.org/help/submit_pdf" target="_blank">'''\ '''arXiv PDF submission</a></li>'''\ '''</ul></blockquote>''' return text def fill_contributors_list(self, contributors): ''' This method display each contributors in the format of an editable input text. This allows the user to modifie it. @param contributors: The list of all contributors of the document @return: the html code that display each dropdown list ''' output = '' is_author = True for author in contributors: nb_rows = 2 author_name = \ '''<LABEL for="name">Name: </LABEL><input type = "text" ''' \ '''name = "contributor_name" size = "100" value = "%s" ''' \ '''id="name"/>''' % author['name'] author_email = \ '''<LABEL for = "email">Email: </LABEL>''' \ '''<input type = "text" name = "contributor_email" ''' \ '''size = "100" value = "%s" id = "email"/>''' % author['email'] author_affiliations = [] for affiliation in author['affiliation']: affiliation_row = \ '''<LABEL for = "affiliation">Affiliation: </LABEL> ''' \ '''<input type="text" name = "contributor_affiliation" ''' \ '''size = "100" value = "%s" id = "affiliation"/>''' % \ affiliation author_affiliations.append(affiliation_row) nb_rows = nb_rows + 1 affiliation_row = \ '''<LABEL for = "affiliation">Affiliation: </LABEL>''' \ '''<input type = "text" name = "contributor_affiliation" ''' \ '''size = "100" id = "affiliation"/>''' author_affiliations.append(affiliation_row) nb_rows = nb_rows + 1 if is_author: output += '''<tr><td rowspan = "%s">Author: </td>''' % nb_rows is_author = False else: output += '''<tr><td rowspan = "%s">Contributor: </td>''' % \ nb_rows output += '''<td>%s</td></tr>''' % author_name if author_email != '': output += '''<tr><td>%s</td></tr>''' % author_email for affiliation in author_affiliations: output += '''<tr><td>%s</td></tr>''' % affiliation output += \ '''<input type = "hidden" name = "contributor_affiliation" ''' \ '''value = "next"/>''' return output def fill_journal_refs_list(self, journal_refs): ''' This method display each journal references in the format of an editable input text. This allows the user to modifie it. @param journal_refs: The list of all journal references of the document @return: the html code that display each dropdown list ''' html = '' if len(journal_refs) > 0: html += ''' <tr> <td align="left"><p>Journal references: </p></td><td> ''' html = html + ''.join([ ''' <p><input type="text" name="journal_refs" size="100" ''' \ '''value="%(journal_ref)s"/></p> ''' % { 'journal_ref': journal_ref } for journal_ref in journal_refs ]) html = html + ''' </td> </tr> ''' return html def fill_report_nos_list(self, report_nos): ''' Concatate a string containing the report number html table rows ''' html = '' if len(report_nos) > 0: html = ''' <tr> <td align="left"><p>Report numbers: </p></td><td> ''' html = html + ''.join([ ''' <p><input type="text" name="report_nos" size="100" ''' \ '''value="%(report_no)s"/></p>''' % { 'report_no': report_no } for report_no in report_nos ]) html = html + ''' </td> </tr> ''' return html def get_list_id_categories(self, categories): ''' gives the id of the categores tuple ''' id_categories = [] for category in categories: id_categories.append(category['id']) return id_categories def format_media_list_by_type(self, medias): ''' This function format the media by type (Main, Uploaded, ...) ''' #format media list by type of document media_type = [] for media in medias: # if it is the first media of this type, create a new type is_type_in_media_type = False for type in media_type: if media['collection'] == type['media_type']: is_type_in_media_type = True if is_type_in_media_type == False: type = {} type['media_type'] = media['collection'] type['media_list'] = [] media_type.append(type) # insert the media in the good media_type element for type in media_type: if type['media_type'] == media['collection']: type['media_list'].append(media) return media_type
gpl-2.0
cernops/nova
nova/tests/functional/api_sample_tests/test_admin_actions.py
26
2398
# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.tests.functional.api_sample_tests import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.legacy_v2.extensions') class AdminActionsSamplesJsonTest(test_servers.ServersSampleBase): extension_name = "os-admin-actions" def _get_flags(self): f = super(AdminActionsSamplesJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.admin_actions.Admin_actions') return f def setUp(self): """setUp Method for AdminActions api samples extension This method creates the server that will be used in each tests """ super(AdminActionsSamplesJsonTest, self).setUp() self.uuid = self._post_server() def test_post_reset_network(self): # Get api samples to reset server network request. response = self._do_post('servers/%s/action' % self.uuid, 'admin-actions-reset-network', {}) self.assertEqual(202, response.status_code) def test_post_inject_network_info(self): # Get api samples to inject network info request. response = self._do_post('servers/%s/action' % self.uuid, 'admin-actions-inject-network-info', {}) self.assertEqual(202, response.status_code) def test_post_reset_state(self): # get api samples to server reset state request. response = self._do_post('servers/%s/action' % self.uuid, 'admin-actions-reset-server-state', {}) self.assertEqual(202, response.status_code)
apache-2.0
pfctdayelise/pytest
src/_pytest/mark/__init__.py
2
5320
""" generic mechanism for marking and selecting python functions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from .legacy import matchkeyword from .legacy import matchmark from .structures import EMPTY_PARAMETERSET_OPTION from .structures import get_empty_parameterset_mark from .structures import Mark from .structures import MARK_GEN from .structures import MarkDecorator from .structures import MarkGenerator from .structures import MarkInfo from .structures import ParameterSet from .structures import transfer_markers from _pytest.config import UsageError __all__ = [ "Mark", "MarkInfo", "MarkDecorator", "MarkGenerator", "transfer_markers", "get_empty_parameterset_mark", ] def param(*values, **kw): """Specify a parameter in `pytest.mark.parametrize`_ calls or :ref:`parametrized fixtures <fixture-parametrize-marks>`. .. code-block:: python @pytest.mark.parametrize("test_input,expected", [ ("3+5", 8), pytest.param("6*9", 42, marks=pytest.mark.xfail), ]) def test_eval(test_input, expected): assert eval(test_input) == expected :param values: variable args of the values of the parameter set, in order. :keyword marks: a single mark or a list of marks to be applied to this parameter set. :keyword str id: the id to attribute to this parameter set. """ return ParameterSet.param(*values, **kw) def pytest_addoption(parser): group = parser.getgroup("general") group._addoption( "-k", action="store", dest="keyword", default="", metavar="EXPRESSION", help="only run tests which match the given substring expression. " "An expression is a python evaluatable expression " "where all names are substring-matched against test names " "and their parent classes. Example: -k 'test_method or test_" "other' matches all test functions and classes whose name " "contains 'test_method' or 'test_other', while -k 'not test_method' " "matches those that don't contain 'test_method' in their names. " "Additionally keywords are matched to classes and functions " "containing extra names in their 'extra_keyword_matches' set, " "as well as functions which have names assigned directly to them.", ) group._addoption( "-m", action="store", dest="markexpr", default="", metavar="MARKEXPR", help="only run tests matching given mark expression. " "example: -m 'mark1 and not mark2'.", ) group.addoption( "--markers", action="store_true", help="show markers (builtin, plugin and per-project ones).", ) parser.addini("markers", "markers for test functions", "linelist") parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets") def pytest_cmdline_main(config): import _pytest.config if config.option.markers: config._do_configure() tw = _pytest.config.create_terminal_writer(config) for line in config.getini("markers"): parts = line.split(":", 1) name = parts[0] rest = parts[1] if len(parts) == 2 else "" tw.write("@pytest.mark.%s:" % name, bold=True) tw.line(rest) tw.line() config._ensure_unconfigure() return 0 pytest_cmdline_main.tryfirst = True def deselect_by_keyword(items, config): keywordexpr = config.option.keyword.lstrip() if keywordexpr.startswith("-"): keywordexpr = "not " + keywordexpr[1:] selectuntil = False if keywordexpr[-1:] == ":": selectuntil = True keywordexpr = keywordexpr[:-1] remaining = [] deselected = [] for colitem in items: if keywordexpr and not matchkeyword(colitem, keywordexpr): deselected.append(colitem) else: if selectuntil: keywordexpr = None remaining.append(colitem) if deselected: config.hook.pytest_deselected(items=deselected) items[:] = remaining def deselect_by_mark(items, config): matchexpr = config.option.markexpr if not matchexpr: return remaining = [] deselected = [] for item in items: if matchmark(item, matchexpr): remaining.append(item) else: deselected.append(item) if deselected: config.hook.pytest_deselected(items=deselected) items[:] = remaining def pytest_collection_modifyitems(items, config): deselect_by_keyword(items, config) deselect_by_mark(items, config) def pytest_configure(config): config._old_mark_config = MARK_GEN._config if config.option.strict: MARK_GEN._config = config empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): raise UsageError( "{!s} must be one of skip, xfail or fail_at_collect" " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset) ) def pytest_unconfigure(config): MARK_GEN._config = getattr(config, "_old_mark_config", None)
mit
js0701/chromium-crosswalk
third_party/WebKit/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py
28
8092
# Copyright (C) 2009 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cStringIO as StringIO import diff_parser import re import unittest from webkitpy.common.checkout.diff_test_data import DIFF_TEST_DATA class DiffParserTest(unittest.TestCase): maxDiff = None def test_diff_parser(self, parser = None): if not parser: parser = diff_parser.DiffParser(DIFF_TEST_DATA.splitlines()) self.assertEqual(3, len(parser.files)) self.assertTrue('WebCore/style/StyleFlexibleBoxData.h' in parser.files) diff = parser.files['WebCore/style/StyleFlexibleBoxData.h'] self.assertEqual(7, len(diff.lines)) # The first two unchaged lines. self.assertEqual((47, 47), diff.lines[0][0:2]) self.assertEqual('', diff.lines[0][2]) self.assertEqual((48, 48), diff.lines[1][0:2]) self.assertEqual(' unsigned align : 3; // EBoxAlignment', diff.lines[1][2]) # The deleted line self.assertEqual((50, 0), diff.lines[3][0:2]) self.assertEqual(' unsigned orient: 1; // EBoxOrient', diff.lines[3][2]) # The first file looks OK. Let's check the next, more complicated file. self.assertTrue('WebCore/style/StyleRareInheritedData.cpp' in parser.files) diff = parser.files['WebCore/style/StyleRareInheritedData.cpp'] # There are 3 chunks. self.assertEqual(7 + 7 + 9, len(diff.lines)) # Around an added line. self.assertEqual((60, 61), diff.lines[9][0:2]) self.assertEqual((0, 62), diff.lines[10][0:2]) self.assertEqual((61, 63), diff.lines[11][0:2]) # Look through the last chunk, which contains both add's and delete's. self.assertEqual((81, 83), diff.lines[14][0:2]) self.assertEqual((82, 84), diff.lines[15][0:2]) self.assertEqual((83, 85), diff.lines[16][0:2]) self.assertEqual((84, 0), diff.lines[17][0:2]) self.assertEqual((0, 86), diff.lines[18][0:2]) self.assertEqual((0, 87), diff.lines[19][0:2]) self.assertEqual((85, 88), diff.lines[20][0:2]) self.assertEqual((86, 89), diff.lines[21][0:2]) self.assertEqual((87, 90), diff.lines[22][0:2]) # Check if a newly added file is correctly handled. diff = parser.files['LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum'] self.assertEqual(1, len(diff.lines)) self.assertEqual((0, 1), diff.lines[0][0:2]) def test_diff_converter(self): comment_lines = [ "Hey guys,\n", "\n", "See my awesome patch below!\n", "\n", " - Cool Hacker\n", "\n", ] revision_lines = [ "Subversion Revision 289799\n", ] svn_diff_lines = [ "Index: Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n", "===================================================================\n", "--- Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n", "+++ Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n", "@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):\n", ] self.assertEqual(diff_parser.get_diff_converter(svn_diff_lines), diff_parser.svn_diff_to_svn_diff) self.assertEqual(diff_parser.get_diff_converter(comment_lines + svn_diff_lines), diff_parser.svn_diff_to_svn_diff) self.assertEqual(diff_parser.get_diff_converter(revision_lines + svn_diff_lines), diff_parser.svn_diff_to_svn_diff) git_diff_lines = [ "diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n", "index 3c5b45b..0197ead 100644\n", "--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n", "+++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n", "@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):\n", ] self.assertEqual(diff_parser.get_diff_converter(git_diff_lines), diff_parser.git_diff_to_svn_diff) self.assertEqual(diff_parser.get_diff_converter(comment_lines + git_diff_lines), diff_parser.git_diff_to_svn_diff) self.assertEqual(diff_parser.get_diff_converter(revision_lines + git_diff_lines), diff_parser.git_diff_to_svn_diff) def test_git_mnemonicprefix(self): p = re.compile(r' ([a|b])/') prefixes = [ { 'a' : 'i', 'b' : 'w' }, # git-diff (compares the (i)ndex and the (w)ork tree) { 'a' : 'c', 'b' : 'w' }, # git-diff HEAD (compares a (c)ommit and the (w)ork tree) { 'a' : 'c', 'b' : 'i' }, # git diff --cached (compares a (c)ommit and the (i)ndex) { 'a' : 'o', 'b' : 'w' }, # git-diff HEAD:file1 file2 (compares an (o)bject and a (w)ork tree entity) { 'a' : '1', 'b' : '2' }, # git diff --no-index a b (compares two non-git things (1) and (2)) ] for prefix in prefixes: patch = p.sub(lambda x: " %s/" % prefix[x.group(1)], DIFF_TEST_DATA) self.test_diff_parser(diff_parser.DiffParser(patch.splitlines())) def test_git_diff_to_svn_diff(self): output = """\ Index: Tools/Scripts/webkitpy/common/checkout/diff_parser.py =================================================================== --- Tools/Scripts/webkitpy/common/checkout/diff_parser.py +++ Tools/Scripts/webkitpy/common/checkout/diff_parser.py @@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line): A B C +D E F """ inputfmt = StringIO.StringIO("""\ diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py index 2ed552c4555db72df16b212547f2c125ae301a04..72870482000c0dba64ce4300ed782c03ee79b74f 100644 --- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py +++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py @@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line): A B C +D E F """) shortfmt = StringIO.StringIO("""\ diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py index b48b162..f300960 100644 --- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py +++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py @@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line): A B C +D E F """) self.assertMultiLineEqual(output, ''.join(diff_parser.git_diff_to_svn_diff(x) for x in shortfmt.readlines())) self.assertMultiLineEqual(output, ''.join(diff_parser.git_diff_to_svn_diff(x) for x in inputfmt.readlines()))
bsd-3-clause
benjaoming/lcrs
lcrs/master/plugins/__init__.py
2
1972
# # LCRS Copyright (C) 2009-2012 # - Benjamin Bach # # LCRS is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # LCRS is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with LCRS. If not, see <http://www.gnu.org/licenses/>. import gtk from lcrs.master.ui.decorators import idle_add_decorator class CallbackFailed(Exception): pass class BasePlugin(): plugin_id = "unique_id" name = "My Plugin" description = "This is a plugin" config = {} def __init__(self, mainwindow_instance, config_master): # an instance of the main window self.mainwindow_instance = mainwindow_instance # an instance of config_master self.config_master = config_master def get_config(self, key): return self.config_master.ui_plugins[self.__class__][key] def activate(self): pass def deactivate(self): pass @idle_add_decorator def show_error_msg(self, msg, parent): if not parent: parent = self.mainwindow_instance.win """Utility function to display a simple error message""" dialog = gtk.MessageDialog(parent=parent, type=gtk.MESSAGE_ERROR, buttons = gtk.BUTTONS_CLOSE, message_format=msg) dialog.set_modal(True) dialog.set_keep_above(True) def on_close(dialog, *args): dialog.destroy() dialog.connect("response", on_close) dialog.show()
gpl-3.0
rshum19/crazyflie-clients-python-matlab
lib/cfclient/utils/config.py
5
3404
#!/usr/bin/env python # -*- coding: utf-8 -*- # # || ____ _ __ # +------+ / __ )(_) /_______________ _____ ___ # | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \ # +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/ # || || /_____/_/\__/\___/_/ \__,_/ /___/\___/ # # Copyright (C) 2011-2013 Bitcraze AB # # Crazyflie Nano Quadcopter Client # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """ Gives access for reading and writing application configuration parameters """ import sys import json import logging from .singleton import Singleton __author__ = 'Bitcraze AB' __all__ = ['Config'] logger = logging.getLogger(__name__) class Config(metaclass=Singleton): """ Singleton class for accessing application configuration """ def __init__(self): """ Initializes the singleton and reads the config files """ self._dist_config = sys.path[0] + "/cfclient/configs/config.json" self._config = sys.path[1] + "/config.json" [self._readonly, self._data] = self._read_distfile() user_config = self._read_config() if (user_config): self._data.update(user_config) def _read_distfile(self): """ Read the distribution config file containing the defaults """ f = open(self._dist_config, 'r') data = json.load(f) f.close() logger.info("Dist config read from %s" % self._dist_config) return [data["read-only"], data["writable"]] def set(self, key, value): """ Set the value of a config parameter """ try: self._data[key] = value except KeyError: raise KeyError("Could not set the parameter [%s]" % key) def get(self, key): """ Get the value of a config parameter """ value = None if (key in self._data): value = self._data[key] elif (key in self._readonly): value = self._readonly[key] else: raise KeyError("Could not get the parameter [%s]" % key) if (isinstance(value, str)): value = str(value) return value def save_file(self): """ Save the user config to file """ json_data = open(self._config, 'w') json_data.write(json.dumps(self._data, indent=2)) json_data.close() logger.info("Config file saved to [%s]" % self._config) def _read_config(self): """ Read the user config from file """ try: json_data = open(self._config) data = json.load(json_data) json_data.close() logger.info("Config file read from [%s]" % self._config) except Exception: return None return data
gpl-2.0
minhphung171093/GreenERP
openerp/service/report.py
56
5118
# -*- coding: utf-8 -*- import base64 import logging import sys import threading import openerp import openerp.report from openerp import tools from openerp.exceptions import UserError import security _logger = logging.getLogger(__name__) # TODO: set a maximum report number per user to avoid DOS attacks # # Report state: # False -> True self_reports = {} self_id = 0 self_id_protect = threading.Semaphore() def dispatch(method, params): (db, uid, passwd ) = params[0:3] threading.current_thread().uid = uid params = params[3:] if method not in ['report', 'report_get', 'render_report']: raise KeyError("Method not supported %s" % method) security.check(db,uid,passwd) openerp.modules.registry.RegistryManager.check_registry_signaling(db) fn = globals()['exp_' + method] res = fn(db, uid, *params) openerp.modules.registry.RegistryManager.signal_caches_change(db) return res def exp_render_report(db, uid, object, ids, datas=None, context=None): if not datas: datas={} if not context: context={} self_id_protect.acquire() global self_id self_id += 1 id = self_id self_id_protect.release() self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None} cr = openerp.registry(db).cursor() try: result, format = openerp.report.render_report(cr, uid, ids, object, datas, context) if not result: tb = sys.exc_info() self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb) self_reports[id]['result'] = result self_reports[id]['format'] = format self_reports[id]['state'] = True except Exception, exception: _logger.exception('Exception: %s\n', exception) if hasattr(exception, 'name') and hasattr(exception, 'value'): self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value)) else: tb = sys.exc_info() self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb) self_reports[id]['state'] = True cr.commit() cr.close() return _check_report(id) def exp_report(db, uid, object, ids, datas=None, context=None): if not datas: datas={} if not context: context={} self_id_protect.acquire() global self_id self_id += 1 id = self_id self_id_protect.release() self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None} def go(id, uid, ids, datas, context): with openerp.api.Environment.manage(): cr = openerp.registry(db).cursor() try: result, format = openerp.report.render_report(cr, uid, ids, object, datas, context) if not result: tb = sys.exc_info() self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb) self_reports[id]['result'] = result self_reports[id]['format'] = format self_reports[id]['state'] = True except Exception, exception: _logger.exception('Exception: %s\n', exception) if hasattr(exception, 'name') and hasattr(exception, 'value'): self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value)) else: tb = sys.exc_info() self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb) self_reports[id]['state'] = True cr.commit() cr.close() return True threading.Thread(target=go, args=(id, uid, ids, datas, context)).start() return id def _check_report(report_id): result = self_reports[report_id] exc = result['exception'] if exc: raise UserError('%s: %s' % (exc.message, exc.traceback)) res = {'state': result['state']} if res['state']: if tools.config['reportgz']: import zlib res2 = zlib.compress(result['result']) res['code'] = 'zlib' else: #CHECKME: why is this needed??? if isinstance(result['result'], unicode): res2 = result['result'].encode('latin1', 'replace') else: res2 = result['result'] if res2: res['result'] = base64.encodestring(res2) res['format'] = result['format'] del self_reports[report_id] return res def exp_report_get(db, uid, report_id): if report_id in self_reports: if self_reports[report_id]['uid'] == uid: return _check_report(report_id) else: raise Exception, 'AccessDenied' else: raise Exception, 'ReportNotFound'
gpl-3.0
weihautin/anki
anki/utils.py
20
11729
# -*- coding: utf-8 -*- # Copyright: Damien Elmes <anki@ichi2.net> # License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html from __future__ import division import re import os import random import time import math import htmlentitydefs import subprocess import tempfile import shutil import string import sys import locale from hashlib import sha1 import platform import traceback from anki.lang import _, ngettext if sys.version_info[1] < 5: def format_string(a, b): return a % b locale.format_string = format_string try: import simplejson as json # make sure simplejson's loads() always returns unicode # we don't try to support .load() origLoads = json.loads def loads(s, *args, **kwargs): if not isinstance(s, unicode): s = unicode(s, "utf8") return origLoads(s, *args, **kwargs) json.loads = loads except ImportError: import json # Time handling ############################################################################## def intTime(scale=1): "The time in integer seconds. Pass scale=1000 to get milliseconds." return int(time.time()*scale) timeTable = { "years": lambda n: ngettext("%s year", "%s years", n), "months": lambda n: ngettext("%s month", "%s months", n), "days": lambda n: ngettext("%s day", "%s days", n), "hours": lambda n: ngettext("%s hour", "%s hours", n), "minutes": lambda n: ngettext("%s minute", "%s minutes", n), "seconds": lambda n: ngettext("%s second", "%s seconds", n), } afterTimeTable = { "years": lambda n: ngettext("%s year<!--after-->", "%s years<!--after-->", n), "months": lambda n: ngettext("%s month<!--after-->", "%s months<!--after-->", n), "days": lambda n: ngettext("%s day<!--after-->", "%s days<!--after-->", n), "hours": lambda n: ngettext("%s hour<!--after-->", "%s hours<!--after-->", n), "minutes": lambda n: ngettext("%s minute<!--after-->", "%s minutes<!--after-->", n), "seconds": lambda n: ngettext("%s second<!--after-->", "%s seconds<!--after-->", n), } def shortTimeFmt(type): return { "years": _("%sy"), "months": _("%smo"), "days": _("%sd"), "hours": _("%sh"), "minutes": _("%sm"), "seconds": _("%ss"), }[type] def fmtTimeSpan(time, pad=0, point=0, short=False, after=False, unit=99): "Return a string representing a time span (eg '2 days')." (type, point) = optimalPeriod(time, point, unit) time = convertSecondsTo(time, type) if not point: time = int(round(time)) if short: fmt = shortTimeFmt(type) else: if after: fmt = afterTimeTable[type](_pluralCount(time, point)) else: fmt = timeTable[type](_pluralCount(time, point)) timestr = "%(a)d.%(b)df" % {'a': pad, 'b': point} return locale.format_string("%" + (fmt % timestr), time) def optimalPeriod(time, point, unit): if abs(time) < 60 or unit < 1: type = "seconds" point -= 1 elif abs(time) < 3600 or unit < 2: type = "minutes" elif abs(time) < 60 * 60 * 24 or unit < 3: type = "hours" elif abs(time) < 60 * 60 * 24 * 30 or unit < 4: type = "days" elif abs(time) < 60 * 60 * 24 * 365 or unit < 5: type = "months" point += 1 else: type = "years" point += 1 return (type, max(point, 0)) def convertSecondsTo(seconds, type): if type == "seconds": return seconds elif type == "minutes": return seconds / 60 elif type == "hours": return seconds / 3600 elif type == "days": return seconds / 86400 elif type == "months": return seconds / 2592000 elif type == "years": return seconds / 31536000 assert False def _pluralCount(time, point): if point: return 2 return math.floor(time) # Locale ############################################################################## def fmtPercentage(float_value, point=1): "Return float with percentage sign" fmt = '%' + "0.%(b)df" % {'b': point} return locale.format_string(fmt, float_value) + "%" def fmtFloat(float_value, point=1): "Return a string with decimal separator according to current locale" fmt = '%' + "0.%(b)df" % {'b': point} return locale.format_string(fmt, float_value) # HTML ############################################################################## reStyle = re.compile("(?s)<style.*?>.*?</style>") reScript = re.compile("(?s)<script.*?>.*?</script>") reTag = re.compile("<.*?>") reEnts = re.compile("&#?\w+;") reMedia = re.compile("<img[^>]+src=[\"']?([^\"'>]+)[\"']?[^>]*>") def stripHTML(s): s = reStyle.sub("", s) s = reScript.sub("", s) s = reTag.sub("", s) s = entsToTxt(s) return s def stripHTMLMedia(s): "Strip HTML but keep media filenames" s = reMedia.sub(" \\1 ", s) return stripHTML(s) def minimizeHTML(s): "Correct Qt's verbose bold/underline/etc." s = re.sub('<span style="font-weight:600;">(.*?)</span>', '<b>\\1</b>', s) s = re.sub('<span style="font-style:italic;">(.*?)</span>', '<i>\\1</i>', s) s = re.sub('<span style="text-decoration: underline;">(.*?)</span>', '<u>\\1</u>', s) return s def entsToTxt(html): # entitydefs defines nbsp as \xa0 instead of a standard space, so we # replace it first html = html.replace("&nbsp;", " ") def fixup(m): text = m.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return unichr(int(text[3:-1], 16)) else: return unichr(int(text[2:-1])) except ValueError: pass else: # named entity try: text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]) except KeyError: pass return text # leave as is return reEnts.sub(fixup, html) # IDs ############################################################################## def hexifyID(id): return "%x" % int(id) def dehexifyID(id): return int(id, 16) def ids2str(ids): """Given a list of integers, return a string '(int1,int2,...)'.""" return "(%s)" % ",".join(str(i) for i in ids) def timestampID(db, table): "Return a non-conflicting timestamp for table." # be careful not to create multiple objects without flushing them, or they # may share an ID. t = intTime(1000) while db.scalar("select id from %s where id = ?" % table, t): t += 1 return t def maxID(db): "Return the first safe ID to use." now = intTime(1000) for tbl in "cards", "notes": now = max(now, db.scalar( "select max(id) from %s" % tbl)) return now + 1 # used in ankiweb def base62(num, extra=""): s = string; table = s.ascii_letters + s.digits + extra buf = "" while num: num, i = divmod(num, len(table)) buf = table[i] + buf return buf _base91_extra_chars = "!#$%&()*+,-./:;<=>?@[]^_`{|}~" def base91(num): # all printable characters minus quotes, backslash and separators return base62(num, _base91_extra_chars) def guid64(): "Return a base91-encoded 64bit random number." return base91(random.randint(0, 2**64-1)) # increment a guid by one, for note type conflicts def incGuid(guid): return _incGuid(guid[::-1])[::-1] def _incGuid(guid): s = string; table = s.ascii_letters + s.digits + _base91_extra_chars idx = table.index(guid[0]) if idx + 1 == len(table): # overflow guid = table[0] + _incGuid(guid[1:]) else: guid = table[idx+1] + guid[1:] return guid # Fields ############################################################################## def joinFields(list): return "\x1f".join(list) def splitFields(string): return string.split("\x1f") # Checksums ############################################################################## def checksum(data): if isinstance(data, unicode): data = data.encode("utf-8") return sha1(data).hexdigest() def fieldChecksum(data): # 32 bit unsigned number from first 8 digits of sha1 hash return int(checksum(stripHTMLMedia(data).encode("utf-8"))[:8], 16) # Temp files ############################################################################## _tmpdir = None def tmpdir(): "A reusable temp folder which we clean out on each program invocation." global _tmpdir if not _tmpdir: def cleanup(): shutil.rmtree(_tmpdir) import atexit atexit.register(cleanup) _tmpdir = unicode(os.path.join(tempfile.gettempdir(), "anki_temp"), \ sys.getfilesystemencoding()) if not os.path.exists(_tmpdir): os.mkdir(_tmpdir) return _tmpdir def tmpfile(prefix="", suffix=""): (fd, name) = tempfile.mkstemp(dir=tmpdir(), prefix=prefix, suffix=suffix) os.close(fd) return name def namedtmp(name, rm=True): "Return tmpdir+name. Deletes any existing file." path = os.path.join(tmpdir(), name) if rm: try: os.unlink(path) except (OSError, IOError): pass return path # Cmd invocation ############################################################################## def call(argv, wait=True, **kwargs): "Execute a command. If WAIT, return exit code." # ensure we don't open a separate window for forking process on windows if isWin: si = subprocess.STARTUPINFO() try: si.dwFlags |= subprocess.STARTF_USESHOWWINDOW except: si.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW else: si = None # run try: o = subprocess.Popen(argv, startupinfo=si, **kwargs) except OSError: # command not found return -1 # wait for command to finish if wait: while 1: try: ret = o.wait() except OSError: # interrupted system call continue break else: ret = 0 return ret # OS helpers ############################################################################## isMac = sys.platform.startswith("darwin") isWin = sys.platform.startswith("win32") invalidFilenameChars = ":*?\"<>|" def invalidFilename(str, dirsep=True): for c in invalidFilenameChars: if c in str: return c if (dirsep or isWin) and "/" in str: return "/" elif (dirsep or not isWin) and "\\" in str: return "\\" elif str.strip().startswith("."): return "." def platDesc(): # we may get an interrupted system call, so try this in a loop n = 0 theos = "unknown" while n < 100: n += 1 try: system = platform.system() if isMac: theos = "mac:%s" % (platform.mac_ver()[0]) elif isWin: theos = "win:%s" % (platform.win32_ver()[0]) elif system == "Linux": dist = platform.dist() theos = "lin:%s:%s" % (dist[0], dist[1]) else: theos = system break except: continue return theos # Debugging ############################################################################## class TimedLog(object): def __init__(self): self._last = time.time() def log(self, s): path, num, fn, y = traceback.extract_stack(limit=2)[0] sys.stderr.write("%5dms: %s(): %s\n" % ((time.time() - self._last)*1000, fn, s)) self._last = time.time()
agpl-3.0
afloren/nipype
nipype/interfaces/fsl/preprocess.py
2
75800
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The fsl module provides classes for interfacing with the `FSL <http://www.fmrib.ox.ac.uk/fsl/index.html>`_ command line tools. This was written to work with FSL version 4.1.4. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ import os import os.path as op import warnings import glob import numpy as np from nipype.interfaces.fsl.base import FSLCommand, FSLCommandInputSpec from nipype.interfaces.base import (TraitedSpec, File, InputMultiPath, OutputMultiPath, Undefined, traits, isdefined, OutputMultiPath) from nipype.utils.filemanip import split_filename from nibabel import load warn = warnings.warn warnings.filterwarnings('always', category=UserWarning) class BETInputSpec(FSLCommandInputSpec): # We use position args here as list indices - so a negative number # will put something on the end in_file = File(exists=True, desc='input file to skull strip', argstr='%s', position=0, mandatory=True) out_file = File(desc='name of output skull stripped image', argstr='%s', position=1, genfile=True, hash_files=False) outline = traits.Bool(desc='create surface outline image', argstr='-o') mask = traits.Bool(desc='create binary mask image', argstr='-m') skull = traits.Bool(desc='create skull image', argstr='-s') no_output = traits.Bool(argstr='-n', desc="Don't generate segmented output") frac = traits.Float(desc='fractional intensity threshold', argstr='-f %.2f') vertical_gradient = traits.Float(argstr='-g %.2f', desc='vertical gradient in fractional intensity ' 'threshold (-1, 1)') radius = traits.Int(argstr='-r %d', units='mm', desc="head radius") center = traits.List(traits.Int, desc='center of gravity in voxels', argstr='-c %s', minlen=0, maxlen=3, units='voxels') threshold = traits.Bool(argstr='-t', desc="apply thresholding to segmented brain image and mask") mesh = traits.Bool(argstr='-e', desc="generate a vtk mesh brain surface") # the remaining 'options' are more like modes (mutually exclusive) that # FSL actually implements in a shell script wrapper around the bet binary. # for some combinations of them in specific order a call would not fail, # but in general using more than one of the following is clearly not # supported _xor_inputs = ('functional', 'reduce_bias', 'robust', 'padding', 'remove_eyes', 'surfaces', 't2_guided') robust = traits.Bool(desc='robust brain centre estimation ' '(iterates BET several times)', argstr='-R', xor=_xor_inputs) padding = traits.Bool(desc='improve BET if FOV is very small in Z ' '(by temporarily padding end slices)', argstr='-Z', xor=_xor_inputs) remove_eyes = traits.Bool(desc='eye & optic nerve cleanup (can be ' 'useful in SIENA)', argstr='-S', xor=_xor_inputs) surfaces = traits.Bool(desc='run bet2 and then betsurf to get additional ' 'skull and scalp surfaces (includes ' 'registrations)', argstr='-A', xor=_xor_inputs) t2_guided = File(desc='as with creating surfaces, when also feeding in ' 'non-brain-extracted T2 (includes registrations)', argstr='-A2 %s', xor=_xor_inputs) functional = traits.Bool(argstr='-F', xor=_xor_inputs, desc="apply to 4D fMRI data") reduce_bias = traits.Bool(argstr='-B', xor=_xor_inputs, desc="bias field and neck cleanup") class BETOutputSpec(TraitedSpec): out_file = File( desc="path/name of skullstripped file (if generated)") mask_file = File( desc="path/name of binary brain mask (if generated)") outline_file = File( desc="path/name of outline file (if generated)") meshfile = File( desc="path/name of vtk mesh file (if generated)") inskull_mask_file = File( desc="path/name of inskull mask (if generated)") inskull_mesh_file = File( desc="path/name of inskull mesh outline (if generated)") outskull_mask_file = File( desc="path/name of outskull mask (if generated)") outskull_mesh_file = File( desc="path/name of outskull mesh outline (if generated)") outskin_mask_file = File( desc="path/name of outskin mask (if generated)") outskin_mesh_file = File( desc="path/name of outskin mesh outline (if generated)") skull_mask_file = File( desc="path/name of skull mask (if generated)") class BET(FSLCommand): """Use FSL BET command for skull stripping. For complete details, see the `BET Documentation. <http://www.fmrib.ox.ac.uk/fsl/bet2/index.html>`_ Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> btr = fsl.BET() >>> btr.inputs.in_file = example_data('structural.nii') >>> btr.inputs.frac = 0.7 >>> res = btr.run() # doctest: +SKIP """ _cmd = 'bet' input_spec = BETInputSpec output_spec = BETOutputSpec def _run_interface(self, runtime): # The returncode is meaningless in BET. So check the output # in stderr and if it's set, then update the returncode # accordingly. runtime = super(BET, self)._run_interface(runtime) if runtime.stderr: self.raise_exception(runtime) return runtime def _gen_outfilename(self): out_file = self.inputs.out_file if not isdefined(out_file) and isdefined(self.inputs.in_file): out_file = self._gen_fname(self.inputs.in_file, suffix='_brain') return os.path.abspath(out_file) def _list_outputs(self): outputs = self.output_spec().get() outputs['out_file'] = self._gen_outfilename() if ((isdefined(self.inputs.mesh) and self.inputs.mesh) or (isdefined(self.inputs.surfaces) and self.inputs.surfaces)): outputs['meshfile'] = self._gen_fname(outputs['out_file'], suffix='_mesh.vtk', change_ext=False) if (isdefined(self.inputs.mask) and self.inputs.mask) or \ (isdefined(self.inputs.reduce_bias) and self.inputs.reduce_bias): outputs['mask_file'] = self._gen_fname(outputs['out_file'], suffix='_mask') if isdefined(self.inputs.outline) and self.inputs.outline: outputs['outline_file'] = self._gen_fname(outputs['out_file'], suffix='_overlay') if isdefined(self.inputs.surfaces) and self.inputs.surfaces: outputs['inskull_mask_file'] = self._gen_fname(outputs['out_file'], suffix='_inskull_mask') outputs['inskull_mesh_file'] = self._gen_fname(outputs['out_file'], suffix='_inskull_mesh') outputs[ 'outskull_mask_file'] = self._gen_fname(outputs['out_file'], suffix='_outskull_mask') outputs[ 'outskull_mesh_file'] = self._gen_fname(outputs['out_file'], suffix='_outskull_mesh') outputs['outskin_mask_file'] = self._gen_fname(outputs['out_file'], suffix='_outskin_mask') outputs['outskin_mesh_file'] = self._gen_fname(outputs['out_file'], suffix='_outskin_mesh') outputs['skull_mask_file'] = self._gen_fname(outputs['out_file'], suffix='_skull_mask') if isdefined(self.inputs.no_output) and self.inputs.no_output: outputs['out_file'] = Undefined return outputs def _gen_filename(self, name): if name == 'out_file': return self._gen_outfilename() return None class FASTInputSpec(FSLCommandInputSpec): """ Defines inputs (trait classes) for FAST """ in_files = InputMultiPath(File(exists=True), copyfile=False, desc='image, or multi-channel set of images, ' 'to be segmented', argstr='%s', position=-1, mandatory=True) out_basename = File(desc='base name of output files', argstr='-o %s') # uses in_file name as basename if none given number_classes = traits.Range(low=1, high=10, argstr='-n %d', desc='number of tissue-type classes') output_biasfield = traits.Bool(desc='output estimated bias field', argstr='-b') output_biascorrected = traits.Bool(desc='output restored image ' '(bias-corrected image)', argstr='-B') img_type = traits.Enum((1, 2, 3), desc='int specifying type of image: ' '(1 = T1, 2 = T2, 3 = PD)', argstr='-t %d') bias_iters = traits.Range(low=1, high=10, argstr='-I %d', desc='number of main-loop iterations during ' 'bias-field removal') bias_lowpass = traits.Range(low=4, high=40, desc='bias field smoothing extent (FWHM) ' 'in mm', argstr='-l %d', units='mm') init_seg_smooth = traits.Range(low=0.0001, high=0.1, desc='initial segmentation spatial ' 'smoothness (during bias field ' 'estimation)', argstr='-f %.3f') segments = traits.Bool(desc='outputs a separate binary image for each ' 'tissue type', argstr='-g') init_transform = File(exists=True, desc='<standard2input.mat> initialise' ' using priors', argstr='-a %s') other_priors = InputMultiPath( File(exist=True), desc='alternative prior images', argstr='-A %s', minlen=3, maxlen=3) no_pve = traits.Bool(desc='turn off PVE (partial volume estimation)', argstr='--nopve') no_bias = traits.Bool(desc='do not remove bias field', argstr='-N') use_priors = traits.Bool(desc='use priors throughout', argstr='-P') # must also set -a!, # mutually inclusive?? # No, conditional # mandatory... need to # figure out how to # handle with traits. segment_iters = traits.Range(low=1, high=50, desc='number of segmentation-initialisation' ' iterations', argstr='-W %d') mixel_smooth = traits.Range(low=0.0, high=1.0, desc='spatial smoothness for mixeltype', argstr='-R %.2f') iters_afterbias = traits.Range(low=1, high=20, desc='number of main-loop iterations ' 'after bias-field removal', argstr='-O %d') hyper = traits.Range(low=0.0, high=1.0, desc='segmentation spatial smoothness', argstr='-H %.2f') verbose = traits.Bool(desc='switch on diagnostic messages', argstr='-v') manual_seg = File(exists=True, desc='Filename containing intensities', argstr='-s %s') probability_maps = traits.Bool(desc='outputs individual probability maps', argstr='-p') class FASTOutputSpec(TraitedSpec): """Specify possible outputs from FAST""" tissue_class_map = File(exists=True, desc='path/name of binary segmented volume file' ' one val for each class _seg') tissue_class_files = OutputMultiPath(File(desc='path/name of binary segmented volumes ' 'one file for each class _seg_x')) restored_image = OutputMultiPath(File(desc='restored images (one for each input image) ' 'named according to the input images _restore')) mixeltype = File(desc="path/name of mixeltype volume file _mixeltype") partial_volume_map = File(desc="path/name of partial volume file _pveseg") partial_volume_files = OutputMultiPath(File(desc='path/name of partial volumes files ' 'one for each class, _pve_x')) bias_field = OutputMultiPath(File(desc='Estimated bias field _bias')) probability_maps = OutputMultiPath(File(desc='filenames, one for each class, for each ' 'input, prob_x')) class FAST(FSLCommand): """ Use FSL FAST for segmenting and bias correction. For complete details, see the `FAST Documentation. <http://www.fmrib.ox.ac.uk/fsl/fast4/index.html>`_ Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data Assign options through the ``inputs`` attribute: >>> fastr = fsl.FAST() >>> fastr.inputs.in_files = example_data('structural.nii') >>> out = fastr.run() #doctest: +SKIP """ _cmd = 'fast' input_spec = FASTInputSpec output_spec = FASTOutputSpec def _format_arg(self, name, spec, value): # first do what should be done in general formated = super(FAST, self)._format_arg(name, spec, value) if name == 'in_files': # FAST needs the -S parameter value to correspond to the number # of input images, otherwise it will ignore all but the first formated = "-S %d %s" % (len(value), formated) return formated def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.number_classes): nclasses = 3 else: nclasses = self.inputs.number_classes # when using multichannel, results basename is based on last # input filename if isdefined(self.inputs.out_basename): basefile = self.inputs.out_basename else: basefile = self.inputs.in_files[-1] outputs['tissue_class_map'] = self._gen_fname(basefile, suffix='_seg') if self.inputs.segments: outputs['tissue_class_files'] = [] for i in range(nclasses): outputs['tissue_class_files'].append( self._gen_fname(basefile, suffix='_seg_%d' % i)) if isdefined(self.inputs.output_biascorrected): outputs['restored_image'] = [] if len(self.inputs.in_files) > 1: # for multi-image segmentation there is one corrected image # per input for val, f in enumerate(self.inputs.in_files): # image numbering is 1-based outputs['restored_image'].append( self._gen_fname(basefile, suffix='_restore_%d' % (val + 1))) else: # single image segmentation has unnumbered output image outputs['restored_image'].append( self._gen_fname(basefile, suffix='_restore')) outputs['mixeltype'] = self._gen_fname(basefile, suffix='_mixeltype') if not self.inputs.no_pve: outputs['partial_volume_map'] = self._gen_fname( basefile, suffix='_pveseg') outputs['partial_volume_files'] = [] for i in range(nclasses): outputs[ 'partial_volume_files'].append(self._gen_fname(basefile, suffix='_pve_%d' % i)) if self.inputs.output_biasfield: outputs['bias_field'] = [] if len(self.inputs.in_files) > 1: # for multi-image segmentation there is one bias field image # per input for val, f in enumerate(self.inputs.in_files): # image numbering is 1-based outputs['bias_field'].append( self._gen_fname(basefile, suffix='_bias_%d' % (val + 1))) else: # single image segmentation has unnumbered output image outputs['bias_field'].append( self._gen_fname(basefile, suffix='_bias')) if self.inputs.probability_maps: outputs['probability_maps'] = [] for i in range(nclasses): outputs['probability_maps'].append( self._gen_fname(basefile, suffix='_prob_%d' % i)) return outputs class FLIRTInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='-in %s', mandatory=True, position=0, desc='input file') reference = File(exists=True, argstr='-ref %s', mandatory=True, position=1, desc='reference file') out_file = File(argstr='-out %s', desc='registered output file', name_source=['in_file'], name_template='%s_flirt', position=2, hash_files=False) out_matrix_file = File(argstr='-omat %s', name_source=['in_file'], keep_extension=True, name_template='%s_flirt.mat', desc='output affine matrix in 4x4 asciii format', position=3, hash_files=False) out_log = File(name_source=['in_file'], keep_extension=True, requires=['save_log'], name_template='%s_flirt.log', desc='output log') in_matrix_file = File(argstr='-init %s', desc='input 4x4 affine matrix') apply_xfm = traits.Bool(argstr='-applyxfm', requires=['in_matrix_file'], desc='apply transformation supplied by in_matrix_file') apply_isoxfm = traits.Float(argstr='-applyisoxfm %f', xor=['apply_xfm'], desc='as applyxfm but forces isotropic resampling') datatype = traits.Enum('char', 'short', 'int', 'float', 'double', argstr='-datatype %s', desc='force output data type') cost = traits.Enum('mutualinfo', 'corratio', 'normcorr', 'normmi', 'leastsq', 'labeldiff', 'bbr', argstr='-cost %s', desc='cost function') # XXX What is the difference between 'cost' and 'searchcost'? Are # these both necessary or do they map to the same variable. cost_func = traits.Enum('mutualinfo', 'corratio', 'normcorr', 'normmi', 'leastsq', 'labeldiff', 'bbr', argstr='-searchcost %s', desc='cost function') uses_qform = traits.Bool(argstr='-usesqform', desc='initialize using sform or qform') display_init = traits.Bool(argstr='-displayinit', desc='display initial matrix') angle_rep = traits.Enum('quaternion', 'euler', argstr='-anglerep %s', desc='representation of rotation angles') interp = traits.Enum('trilinear', 'nearestneighbour', 'sinc', 'spline', argstr='-interp %s', desc='final interpolation method used in reslicing') sinc_width = traits.Int(argstr='-sincwidth %d', units='voxels', desc='full-width in voxels') sinc_window = traits.Enum('rectangular', 'hanning', 'blackman', argstr='-sincwindow %s', desc='sinc window') # XXX better doc bins = traits.Int(argstr='-bins %d', desc='number of histogram bins') dof = traits.Int(argstr='-dof %d', desc='number of transform degrees of freedom') no_resample = traits.Bool(argstr='-noresample', desc='do not change input sampling') force_scaling = traits.Bool(argstr='-forcescaling', desc='force rescaling even for low-res images') min_sampling = traits.Float(argstr='-minsampling %f', units='mm', desc='set minimum voxel dimension for sampling') padding_size = traits.Int(argstr='-paddingsize %d', units='voxels', desc='for applyxfm: interpolates outside image ' 'by size') searchr_x = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees', argstr='-searchrx %s', desc='search angles along x-axis, in degrees') searchr_y = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees', argstr='-searchry %s', desc='search angles along y-axis, in degrees') searchr_z = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees', argstr='-searchrz %s', desc='search angles along z-axis, in degrees') no_search = traits.Bool(argstr='-nosearch', desc='set all angular searches to ranges 0 to 0') coarse_search = traits.Int(argstr='-coarsesearch %d', units='degrees', desc='coarse search delta angle') fine_search = traits.Int(argstr='-finesearch %d', units='degrees', desc='fine search delta angle') schedule = File(exists=True, argstr='-schedule %s', desc='replaces default schedule') ref_weight = File(exists=True, argstr='-refweight %s', desc='File for reference weighting volume') in_weight = File(exists=True, argstr='-inweight %s', desc='File for input weighting volume') no_clamp = traits.Bool(argstr='-noclamp', desc='do not use intensity clamping') no_resample_blur = traits.Bool(argstr='-noresampblur', desc='do not use blurring on downsampling') rigid2D = traits.Bool(argstr='-2D', desc='use 2D rigid body mode - ignores dof') save_log = traits.Bool(desc='save to log file') verbose = traits.Int(argstr='-verbose %d', desc='verbose mode, 0 is least') bgvalue = traits.Float(0, argstr='-setbackground %f', desc=('use specified background value for points ' 'outside FOV')) # BBR options wm_seg = File( argstr='-wmseg %s', min_ver='5.0.0', desc='white matter segmentation volume needed by BBR cost function') wmcoords = File( argstr='-wmcoords %s', min_ver='5.0.0', desc='white matter boundary coordinates for BBR cost function') wmnorms = File( argstr='-wmnorms %s', min_ver='5.0.0', desc='white matter boundary normals for BBR cost function') fieldmap = File( argstr='-fieldmap %s', min_ver='5.0.0', desc='fieldmap image in rads/s - must be already registered to the reference image') fieldmapmask = File( argstr='-fieldmapmask %s', min_ver='5.0.0', desc='mask for fieldmap image') pedir = traits.Int( argstr='-pedir %d', min_ver='5.0.0', desc='phase encode direction of EPI - 1/2/3=x/y/z & -1/-2/-3=-x/-y/-z') echospacing = traits.Float( argstr='-echospacing %f', min_ver='5.0.0', desc='value of EPI echo spacing - units of seconds') bbrtype = traits.Enum( 'signed', 'global_abs', 'local_abs', argstr='-bbrtype %s', min_ver='5.0.0', desc='type of bbr cost function: signed [default], global_abs, local_abs') bbrslope = traits.Float( argstr='-bbrslope %f', min_ver='5.0.0', desc='value of bbr slope') class FLIRTOutputSpec(TraitedSpec): out_file = File(exists=True, desc='path/name of registered file (if generated)') out_matrix_file = File(exists=True, desc='path/name of calculated affine transform ' '(if generated)') out_log = File(desc='path/name of output log (if generated)') class FLIRT(FSLCommand): """Use FSL FLIRT for coregistration. For complete details, see the `FLIRT Documentation. <http://www.fmrib.ox.ac.uk/fsl/flirt/index.html>`_ To print out the command line help, use: fsl.FLIRT().inputs_help() Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> flt = fsl.FLIRT(bins=640, cost_func='mutualinfo') >>> flt.inputs.in_file = 'structural.nii' >>> flt.inputs.reference = 'mni.nii' >>> flt.inputs.output_type = "NIFTI_GZ" >>> flt.cmdline #doctest: +ELLIPSIS 'flirt -in structural.nii -ref mni.nii -out structural_flirt.nii.gz -omat structural_flirt.mat -bins 640 -searchcost mutualinfo' >>> res = flt.run() #doctest: +SKIP """ _cmd = 'flirt' input_spec = FLIRTInputSpec output_spec = FLIRTOutputSpec def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = super(FLIRT, self).aggregate_outputs( runtime=runtime, needed_outputs=needed_outputs) if isdefined(self.inputs.save_log) and self.inputs.save_log: with open(outputs.out_log, "a") as text_file: text_file.write(runtime.stdout + '\n') return outputs def _parse_inputs(self, skip=None): skip = [] if isdefined(self.inputs.save_log) and self.inputs.save_log: if not isdefined(self.inputs.verbose) or self.inputs.verbose == 0: self.inputs.verbose = 1 skip.append('save_log') return super(FLIRT, self)._parse_inputs(skip=skip) class ApplyXfmInputSpec(FLIRTInputSpec): apply_xfm = traits.Bool( True, argstr='-applyxfm', requires=['in_matrix_file'], desc='apply transformation supplied by in_matrix_file', usedefault=True) class ApplyXfm(FLIRT): """Currently just a light wrapper around FLIRT, with no modifications ApplyXfm is used to apply an existing tranform to an image Examples -------- >>> import nipype.interfaces.fsl as fsl >>> from nipype.testing import example_data >>> applyxfm = fsl.ApplyXfm() >>> applyxfm.inputs.in_file = example_data('structural.nii') >>> applyxfm.inputs.in_matrix_file = example_data('trans.mat') >>> applyxfm.inputs.out_file = 'newfile.nii' >>> applyxfm.inputs.reference = example_data('mni.nii') >>> applyxfm.inputs.apply_xfm = True >>> result = applyxfm.run() # doctest: +SKIP """ input_spec = ApplyXfmInputSpec class MCFLIRTInputSpec(FSLCommandInputSpec): in_file = File(exists=True, position=0, argstr="-in %s", mandatory=True, desc="timeseries to motion-correct") out_file = File(argstr='-out %s', genfile=True, desc="file to write", hash_files=False) cost = traits.Enum( 'mutualinfo', 'woods', 'corratio', 'normcorr', 'normmi', 'leastsquares', argstr='-cost %s', desc="cost function to optimize") bins = traits.Int(argstr='-bins %d', desc="number of histogram bins") dof = traits.Int( argstr='-dof %d', desc="degrees of freedom for the transformation") ref_vol = traits.Int(argstr='-refvol %d', desc="volume to align frames to") scaling = traits.Float( argstr='-scaling %.2f', desc="scaling factor to use") smooth = traits.Float( argstr='-smooth %.2f', desc="smoothing factor for the cost function") rotation = traits.Int( argstr='-rotation %d', desc="scaling factor for rotation tolerances") stages = traits.Int(argstr='-stages %d', desc="stages (if 4, perform final search with sinc interpolation") init = File(exists=True, argstr='-init %s', desc="inital transformation matrix") interpolation = traits.Enum("spline", "nn", "sinc", argstr="-%s_final", desc="interpolation method for transformation") use_gradient = traits.Bool( argstr='-gdt', desc="run search on gradient images") use_contour = traits.Bool( argstr='-edge', desc="run search on contour images") mean_vol = traits.Bool(argstr='-meanvol', desc="register to mean volume") stats_imgs = traits.Bool( argstr='-stats', desc="produce variance and std. dev. images") save_mats = traits.Bool( argstr='-mats', desc="save transformation matrices") save_plots = traits.Bool( argstr='-plots', desc="save transformation parameters") save_rms = traits.Bool( argstr='-rmsabs -rmsrel', desc="save rms displacement parameters") ref_file = File(exists=True, argstr='-reffile %s', desc="target image for motion correction") class MCFLIRTOutputSpec(TraitedSpec): out_file = File(exists=True, desc="motion-corrected timeseries") variance_img = File(exists=True, desc="variance image") std_img = File(exists=True, desc="standard deviation image") mean_img = File(exists=True, desc="mean timeseries image") par_file = File(exists=True, desc="text-file with motion parameters") mat_file = OutputMultiPath(File( exists=True), desc="transformation matrices") rms_files = OutputMultiPath(File(exists=True), desc="absolute and relative displacement parameters") class MCFLIRT(FSLCommand): """Use FSL MCFLIRT to do within-modality motion correction. For complete details, see the `MCFLIRT Documentation. <http://www.fmrib.ox.ac.uk/fsl/mcflirt/index.html>`_ Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> mcflt = fsl.MCFLIRT(in_file=example_data('functional.nii'), cost='mutualinfo') >>> res = mcflt.run() # doctest: +SKIP """ _cmd = 'mcflirt' input_spec = MCFLIRTInputSpec output_spec = MCFLIRTOutputSpec def _format_arg(self, name, spec, value): if name == "interpolation": if value == "trilinear": return "" else: return spec.argstr % value return super(MCFLIRT, self)._format_arg(name, spec, value) def _list_outputs(self): cwd = os.getcwd() outputs = self._outputs().get() outputs['out_file'] = self._gen_outfilename() if isdefined(self.inputs.stats_imgs) and self.inputs.stats_imgs: outputs['variance_img'] = self._gen_fname(outputs['out_file'] + '_variance.ext', cwd=cwd) outputs['std_img'] = self._gen_fname(outputs['out_file'] + '_sigma.ext', cwd=cwd) # The mean image created if -stats option is specified ('meanvol') # is missing the top and bottom slices. Therefore we only expose the # mean image created by -meanvol option ('mean_reg') which isn't # corrupted. # Note that the same problem holds for the std and variance image. if isdefined(self.inputs.mean_vol) and self.inputs.mean_vol: outputs['mean_img'] = self._gen_fname(outputs['out_file'] + '_mean_reg.ext', cwd=cwd) if isdefined(self.inputs.save_mats) and self.inputs.save_mats: _, filename = os.path.split(outputs['out_file']) matpathname = os.path.join(cwd, filename + '.mat') _, _, _, timepoints = load(self.inputs.in_file).get_shape() outputs['mat_file'] = [] for t in range(timepoints): outputs['mat_file'].append(os.path.join(matpathname, 'MAT_%04d' % t)) if isdefined(self.inputs.save_plots) and self.inputs.save_plots: # Note - if e.g. out_file has .nii.gz, you get .nii.gz.par, # which is what mcflirt does! outputs['par_file'] = outputs['out_file'] + '.par' if isdefined(self.inputs.save_rms) and self.inputs.save_rms: outfile = outputs['out_file'] outputs['rms_files'] = [outfile + '_abs.rms', outfile + '_rel.rms'] return outputs def _gen_filename(self, name): if name == 'out_file': return self._gen_outfilename() return None def _gen_outfilename(self): out_file = self.inputs.out_file if isdefined(out_file): out_file = os.path.realpath(out_file) if not isdefined(out_file) and isdefined(self.inputs.in_file): out_file = self._gen_fname(self.inputs.in_file, suffix='_mcf') return os.path.abspath(out_file) class FNIRTInputSpec(FSLCommandInputSpec): ref_file = File(exists=True, argstr='--ref=%s', mandatory=True, desc='name of reference image') in_file = File(exists=True, argstr='--in=%s', mandatory=True, desc='name of input image') affine_file = File(exists=True, argstr='--aff=%s', desc='name of file containing affine transform') inwarp_file = File(exists=True, argstr='--inwarp=%s', desc='name of file containing initial non-linear warps') in_intensitymap_file = File(exists=True, argstr='--intin=%s', desc='name of file/files containing initial intensity maping' 'usually generated by previos fnirt run') fieldcoeff_file = traits.Either(traits.Bool, File, argstr='--cout=%s', desc='name of output file with field coefficients or true') warped_file = File(argstr='--iout=%s', desc='name of output image', genfile=True, hash_files=False) field_file = traits.Either(traits.Bool, File, argstr='--fout=%s', desc='name of output file with field or true', hash_files=False) jacobian_file = traits.Either(traits.Bool, File, argstr='--jout=%s', desc='name of file for writing out the Jacobian' 'of the field (for diagnostic or VBM purposes)', hash_files=False) modulatedref_file = traits.Either(traits.Bool, File, argstr='--refout=%s', desc='name of file for writing out intensity modulated' '--ref (for diagnostic purposes)', hash_files=False) out_intensitymap_file = traits.Either(traits.Bool, File, argstr='--intout=%s', desc='name of files for writing information pertaining ' 'to intensity mapping', hash_files=False) log_file = File(argstr='--logout=%s', desc='Name of log-file', genfile=True, hash_files=False) config_file = traits.Either( traits.Enum("T1_2_MNI152_2mm", "FA_2_FMRIB58_1mm"), File(exists=True), argstr='--config=%s', desc='Name of config file specifying command line arguments') refmask_file = File(exists=True, argstr='--refmask=%s', desc='name of file with mask in reference space') inmask_file = File(exists=True, argstr='--inmask=%s', desc='name of file with mask in input image space') skip_refmask = traits.Bool( argstr='--applyrefmask=0', xor=['apply_refmask'], desc='Skip specified refmask if set, default false') skip_inmask = traits.Bool(argstr='--applyinmask=0', xor=['apply_inmask'], desc='skip specified inmask if set, default false') apply_refmask = traits.List( traits.Enum(0, 1), argstr='--applyrefmask=%s', xor=['skip_refmask'], desc='list of iterations to use reference mask on (1 to use, 0 to skip)', sep=",") apply_inmask = traits.List( traits.Enum(0, 1), argstr='--applyinmask=%s', xor=['skip_inmask'], desc='list of iterations to use input mask on (1 to use, 0 to skip)', sep=",") skip_implicit_ref_masking = traits.Bool(argstr='--imprefm=0', desc='skip implicit masking based on value' 'in --ref image. Default = 0') skip_implicit_in_masking = traits.Bool(argstr='--impinm=0', desc='skip implicit masking based on value' 'in --in image. Default = 0') refmask_val = traits.Float(argstr='--imprefval=%f', desc='Value to mask out in --ref image. Default =0.0') inmask_val = traits.Float(argstr='--impinval=%f', desc='Value to mask out in --in image. Default =0.0') max_nonlin_iter = traits.List(traits.Int, argstr='--miter=%s', desc='Max # of non-linear iterations list, default [5, 5, 5, 5]', sep=",") subsampling_scheme = traits.List(traits.Int, argstr='--subsamp=%s', desc='sub-sampling scheme, list, default [4, 2, 1, 1]', sep=",") warp_resolution = traits.Tuple(traits.Int, traits.Int, traits.Int, argstr='--warpres=%d,%d,%d', desc='(approximate) resolution (in mm) of warp basis ' 'in x-, y- and z-direction, default 10, 10, 10') spline_order = traits.Int(argstr='--splineorder=%d', desc='Order of spline, 2->Qadratic spline, 3->Cubic spline. Default=3') in_fwhm = traits.List(traits.Int, argstr='--infwhm=%s', desc='FWHM (in mm) of gaussian smoothing kernel for input volume, default [6, 4, 2, 2]', sep=",") ref_fwhm = traits.List(traits.Int, argstr='--reffwhm=%s', desc='FWHM (in mm) of gaussian smoothing kernel for ref volume, default [4, 2, 0, 0]', sep=",") regularization_model = traits.Enum('membrane_energy', 'bending_energy', argstr='--regmod=%s', desc='Model for regularisation of warp-field [membrane_energy bending_energy], default bending_energy') regularization_lambda = traits.List(traits.Float, argstr='--lambda=%s', desc='Weight of regularisation, default depending on --ssqlambda and --regmod ' 'switches. See user documetation.', sep=",") skip_lambda_ssq = traits.Bool(argstr='--ssqlambda=0', desc='If true, lambda is not weighted by current ssq, default false') jacobian_range = traits.Tuple(traits.Float, traits.Float, argstr='--jacrange=%f,%f', desc='Allowed range of Jacobian determinants, default 0.01, 100.0') derive_from_ref = traits.Bool(argstr='--refderiv', desc='If true, ref image is used to calculate derivatives. Default false') intensity_mapping_model = traits.Enum('none', 'global_linear', 'global_non_linear' 'local_linear', 'global_non_linear_with_bias', 'local_non_linear', argstr='--intmod=%s', desc='Model for intensity-mapping') intensity_mapping_order = traits.Int(argstr='--intorder=%d', desc='Order of poynomial for mapping intensities, default 5') biasfield_resolution = traits.Tuple(traits.Int, traits.Int, traits.Int, argstr='--biasres=%d,%d,%d', desc='Resolution (in mm) of bias-field modelling ' 'local intensities, default 50, 50, 50') bias_regularization_lambda = traits.Float(argstr='--biaslambda=%f', desc='Weight of regularisation for bias-field, default 10000') skip_intensity_mapping = traits.Bool( argstr='--estint=0', xor=['apply_intensity_mapping'], desc='Skip estimate intensity-mapping default false') apply_intensity_mapping = traits.List( traits.Enum(0, 1), argstr='--estint=%s', xor=['skip_intensity_mapping'], desc='List of subsampling levels to apply intensity mapping for (0 to skip, 1 to apply)', sep=",") hessian_precision = traits.Enum('double', 'float', argstr='--numprec=%s', desc='Precision for representing Hessian, double or float. Default double') class FNIRTOutputSpec(TraitedSpec): fieldcoeff_file = File(exists=True, desc='file with field coefficients') warped_file = File(exists=True, desc='warped image') field_file = File(desc='file with warp field') jacobian_file = File(desc='file containing Jacobian of the field') modulatedref_file = File(desc='file containing intensity modulated --ref') out_intensitymap_file = File( desc='file containing info pertaining to intensity mapping') log_file = File(desc='Name of log-file') class FNIRT(FSLCommand): """Use FSL FNIRT for non-linear registration. Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> fnt = fsl.FNIRT(affine_file=example_data('trans.mat')) >>> res = fnt.run(ref_file=example_data('mni.nii', in_file=example_data('structural.nii')) #doctest: +SKIP T1 -> Mni153 >>> from nipype.interfaces import fsl >>> fnirt_mprage = fsl.FNIRT() >>> fnirt_mprage.inputs.in_fwhm = [8, 4, 2, 2] >>> fnirt_mprage.inputs.subsampling_scheme = [4, 2, 1, 1] Specify the resolution of the warps >>> fnirt_mprage.inputs.warp_resolution = (6, 6, 6) >>> res = fnirt_mprage.run(in_file='structural.nii', ref_file='mni.nii', warped_file='warped.nii', fieldcoeff_file='fieldcoeff.nii')#doctest: +SKIP We can check the command line and confirm that it's what we expect. >>> fnirt_mprage.cmdline #doctest: +SKIP 'fnirt --cout=fieldcoeff.nii --in=structural.nii --infwhm=8,4,2,2 --ref=mni.nii --subsamp=4,2,1,1 --warpres=6,6,6 --iout=warped.nii' """ _cmd = 'fnirt' input_spec = FNIRTInputSpec output_spec = FNIRTOutputSpec filemap = {'warped_file': 'warped', 'field_file': 'field', 'jacobian_file': 'field_jacobian', 'modulatedref_file': 'modulated', 'out_intensitymap_file': 'intmap', 'log_file': 'log.txt', 'fieldcoeff_file': 'fieldwarp'} def _list_outputs(self): outputs = self.output_spec().get() for key, suffix in self.filemap.items(): inval = getattr(self.inputs, key) change_ext = True if key in ['warped_file', 'log_file']: if suffix.endswith('.txt'): change_ext = False if isdefined(inval): outputs[key] = inval else: outputs[key] = self._gen_fname(self.inputs.in_file, suffix='_' + suffix, change_ext=change_ext) elif isdefined(inval): if isinstance(inval, bool): if inval: outputs[key] = self._gen_fname(self.inputs.in_file, suffix='_' + suffix, change_ext=change_ext) else: outputs[key] = os.path.abspath(inval) return outputs def _format_arg(self, name, spec, value): if name in self.filemap.keys(): return spec.argstr % self._list_outputs()[name] return super(FNIRT, self)._format_arg(name, spec, value) def _gen_filename(self, name): if name in ['warped_file', 'log_file']: return self._list_outputs()[name] return None def write_config(self, configfile): """Writes out currently set options to specified config file XX TODO : need to figure out how the config file is written Parameters ---------- configfile : /path/to/configfile """ try: fid = open(configfile, 'w+') except IOError: print ('unable to create config_file %s' % (configfile)) for item in self.inputs.get().items(): fid.write('%s\n' % (item)) fid.close() class ApplyWarpInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='--in=%s', mandatory=True, position=0, desc='image to be warped') out_file = File(argstr='--out=%s', genfile=True, position=2, desc='output filename', hash_files=False) ref_file = File(exists=True, argstr='--ref=%s', mandatory=True, position=1, desc='reference image') field_file = File(exists=True, argstr='--warp=%s', desc='file containing warp field') abswarp = traits.Bool(argstr='--abs', xor=['relwarp'], desc="treat warp field as absolute: x' = w(x)") relwarp = traits.Bool(argstr='--rel', xor=['abswarp'], position=-1, desc="treat warp field as relative: x' = x + w(x)") datatype = traits.Enum('char', 'short', 'int', 'float', 'double', argstr='--datatype=%s', desc='Force output data type [char short int float double].') supersample = traits.Bool(argstr='--super', desc='intermediary supersampling of output, default is off') superlevel = traits.Either(traits.Enum('a'), traits.Int, argstr='--superlevel=%s', desc="level of intermediary supersampling, a for 'automatic' or integer level. Default = 2") premat = File(exists=True, argstr='--premat=%s', desc='filename for pre-transform (affine matrix)') postmat = File(exists=True, argstr='--postmat=%s', desc='filename for post-transform (affine matrix)') mask_file = File(exists=True, argstr='--mask=%s', desc='filename for mask image (in reference space)') interp = traits.Enum( 'nn', 'trilinear', 'sinc', 'spline', argstr='--interp=%s', position=-2, desc='interpolation method') class ApplyWarpOutputSpec(TraitedSpec): out_file = File(exists=True, desc='Warped output file') class ApplyWarp(FSLCommand): """Use FSL's applywarp to apply the results of a FNIRT registration Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> aw = fsl.ApplyWarp() >>> aw.inputs.in_file = example_data('structural.nii') >>> aw.inputs.ref_file = example_data('mni.nii') >>> aw.inputs.field_file = 'my_coefficients_filed.nii' #doctest: +SKIP >>> res = aw.run() #doctest: +SKIP """ _cmd = 'applywarp' input_spec = ApplyWarpInputSpec output_spec = ApplyWarpOutputSpec def _format_arg(self, name, spec, value): if name == 'superlevel': return spec.argstr % str(value) return super(ApplyWarp, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() if not isdefined(self.inputs.out_file): outputs['out_file'] = self._gen_fname(self.inputs.in_file, suffix='_warp') else: outputs['out_file'] = os.path.abspath(self.inputs.out_file) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()[name] return None class SliceTimerInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='--in=%s', mandatory=True, position=0, desc='filename of input timeseries') out_file = File(argstr='--out=%s', genfile=True, desc='filename of output timeseries', hash_files=False) index_dir = traits.Bool(argstr='--down', desc='slice indexing from top to bottom') time_repetition = traits.Float(argstr='--repeat=%f', desc='Specify TR of data - default is 3s') slice_direction = traits.Enum(1, 2, 3, argstr='--direction=%d', desc='direction of slice acquisition (x=1, y=2, z=3) - default is z') interleaved = traits.Bool(argstr='--odd', desc='use interleaved acquisition') custom_timings = File(exists=True, argstr='--tcustom=%s', desc='slice timings, in fractions of TR, range 0:1 (default is 0.5 = no shift)') global_shift = traits.Float(argstr='--tglobal', desc='shift in fraction of TR, range 0:1 (default is 0.5 = no shift)') custom_order = File(exists=True, argstr='--ocustom=%s', desc='filename of single-column custom interleave order file (first slice is referred to as 1 not 0)') class SliceTimerOutputSpec(TraitedSpec): slice_time_corrected_file = File( exists=True, desc='slice time corrected file') class SliceTimer(FSLCommand): """ use FSL slicetimer to perform slice timing correction. Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> st = fsl.SliceTimer() >>> st.inputs.in_file = example_data('functional.nii') >>> st.inputs.interleaved = True >>> result = st.run() #doctest: +SKIP """ _cmd = 'slicetimer' input_spec = SliceTimerInputSpec output_spec = SliceTimerOutputSpec def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.out_file if not isdefined(out_file): out_file = self._gen_fname(self.inputs.in_file, suffix='_st') outputs['slice_time_corrected_file'] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()['slice_time_corrected_file'] return None class SUSANInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='%s', mandatory=True, position=1, desc='filename of input timeseries') brightness_threshold = traits.Float(argstr='%.10f', position=2, mandatory=True, desc='brightness threshold and should be greater than ' 'noise level and less than contrast of edges to ' 'be preserved.') fwhm = traits.Float(argstr='%.10f', position=3, mandatory=True, desc='fwhm of smoothing, in mm, gets converted using sqrt(8*log(2))') dimension = traits.Enum(3, 2, argstr='%d', position=4, usedefault=True, desc='within-plane (2) or fully 3D (3)') use_median = traits.Enum(1, 0, argstr='%d', position=5, usedefault=True, desc='whether to use a local median filter in the cases where single-point noise is detected') usans = traits.List( traits.Tuple(File(exists=True), traits.Float), maxlen=2, argstr='', position=6, default=[], usedefault=True, desc='determines whether the smoothing area (USAN) is to be ' 'found from secondary images (0, 1 or 2). A negative ' 'value for any brightness threshold will auto-set the ' 'threshold at 10% of the robust range') out_file = File(argstr='%s', position=-1, genfile=True, desc='output file name', hash_files=False) class SUSANOutputSpec(TraitedSpec): smoothed_file = File(exists=True, desc='smoothed output file') class SUSAN(FSLCommand): """ use FSL SUSAN to perform smoothing Examples -------- >>> from nipype.interfaces import fsl >>> from nipype.testing import example_data >>> print anatfile #doctest: +SKIP anatomical.nii #doctest: +SKIP >>> sus = fsl.SUSAN() >>> sus.inputs.in_file = example_data('structural.nii') >>> sus.inputs.brightness_threshold = 2000.0 >>> sus.inputs.fwhm = 8.0 >>> result = sus.run() #doctest: +SKIP """ _cmd = 'susan' input_spec = SUSANInputSpec output_spec = SUSANOutputSpec def _format_arg(self, name, spec, value): if name == 'fwhm': return spec.argstr % (float(value) / np.sqrt(8 * np.log(2))) if name == 'usans': if not value: return '0' arglist = [str(len(value))] for filename, thresh in value: arglist.extend([filename, '%.10f' % thresh]) return ' '.join(arglist) return super(SUSAN, self)._format_arg(name, spec, value) def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.out_file if not isdefined(out_file): out_file = self._gen_fname(self.inputs.in_file, suffix='_smooth') outputs['smoothed_file'] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): if name == 'out_file': return self._list_outputs()['smoothed_file'] return None class FUGUEInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='--in=%s', desc='filename of input volume') shift_in_file = File(exists=True, argstr='--loadshift=%s', desc='filename for reading pixel shift volume') phasemap_in_file = File(exists=True, argstr='--phasemap=%s', desc='filename for input phase image') fmap_in_file = File(exists=True, argstr='--loadfmap=%s', desc='filename for loading fieldmap (rad/s)') unwarped_file = File(argstr='--unwarp=%s', desc='apply unwarping and save as filename', xor=['warped_file'], requires=['in_file']) warped_file = File(argstr='--warp=%s', desc='apply forward warping and save as filename', xor=['unwarped_file'], requires=['in_file']) forward_warping = traits.Bool(False, usedefault=True, desc='apply forward warping instead of unwarping') dwell_to_asym_ratio = traits.Float(argstr='--dwelltoasym=%.10f', desc='set the dwell to asym time ratio') dwell_time = traits.Float(argstr='--dwell=%.10f', desc=('set the EPI dwell time per phase-encode line - same as echo ' 'spacing - (sec)')) asym_se_time = traits.Float(argstr='--asym=%.10f', desc='set the fieldmap asymmetric spin echo time (sec)') median_2dfilter = traits.Bool(argstr='--median', desc='apply 2D median filtering') despike_2dfilter = traits.Bool(argstr='--despike', desc='apply a 2D de-spiking filter') no_gap_fill = traits.Bool(argstr='--nofill', desc='do not apply gap-filling measure to the fieldmap') no_extend = traits.Bool(argstr='--noextend', desc='do not apply rigid-body extrapolation to the fieldmap') smooth2d = traits.Float(argstr='--smooth2=%.2f', desc='apply 2D Gaussian smoothing of sigma N (in mm)') smooth3d = traits.Float(argstr='--smooth3=%.2f', desc='apply 3D Gaussian smoothing of sigma N (in mm)') poly_order = traits.Int(argstr='--poly=%d', desc='apply polynomial fitting of order N') fourier_order = traits.Int(argstr='--fourier=%d', desc='apply Fourier (sinusoidal) fitting of order N') pava = traits.Bool(argstr='--pava', desc='apply monotonic enforcement via PAVA') despike_threshold = traits.Float(argstr='--despikethreshold=%s', desc='specify the threshold for de-spiking (default=3.0)') unwarp_direction = traits.Enum('x', 'y', 'z', 'x-', 'y-', 'z-', argstr='--unwarpdir=%s', desc='specifies direction of warping (default y)') phase_conjugate = traits.Bool(argstr='--phaseconj', desc='apply phase conjugate method of unwarping') icorr = traits.Bool(argstr='--icorr', requires=['shift_in_file'], desc='apply intensity correction to unwarping (pixel shift method only)') icorr_only = traits.Bool(argstr='--icorronly', requires=['unwarped_file'], desc='apply intensity correction only') mask_file = File(exists=True, argstr='--mask=%s', desc='filename for loading valid mask') nokspace = traits.Bool(False, argstr='--nokspace', desc='do not use k-space forward warping') # Special outputs: shift (voxel shift map, vsm) save_shift = traits.Bool(False, xor=['save_unmasked_shift'], desc='write pixel shift volume') shift_out_file = File(argstr='--saveshift=%s', desc='filename for saving pixel shift volume') save_unmasked_shift = traits.Bool(argstr='--unmaskshift', xor=['save_shift'], desc='saves the unmasked shiftmap when using --saveshift') # Special outputs: fieldmap (fmap) save_fmap = traits.Bool(False, xor=['save_unmasked_fmap'], desc='write field map volume') fmap_out_file = File(argstr='--savefmap=%s', desc='filename for saving fieldmap (rad/s)') save_unmasked_fmap = traits.Bool(False, argstr='--unmaskfmap', xor=['save_fmap'], desc='saves the unmasked fieldmap when using --savefmap') class FUGUEOutputSpec(TraitedSpec): unwarped_file = File(desc='unwarped file') warped_file = File(desc='forward warped file') shift_out_file = File(desc='voxel shift map file') fmap_out_file = File(desc='fieldmap file') class FUGUE(FSLCommand): """ `FUGUE <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FUGUE>`_ is, most generally, a set of tools for EPI distortion correction. Distortions may be corrected for 1. improving registration with non-distorted images (e.g. structurals), or 2. dealing with motion-dependent changes. FUGUE is designed to deal only with the first case - improving registration. Examples -------- Unwarping an input image (shift map is known) >>> from nipype.interfaces.fsl.preprocess import FUGUE >>> fugue = FUGUE() >>> fugue.inputs.in_file = 'epi.nii' >>> fugue.inputs.mask_file = 'epi_mask.nii' >>> fugue.inputs.shift_in_file = 'vsm.nii' # Previously computed with fugue as well >>> fugue.inputs.unwarp_direction = 'y' >>> fugue.inputs.output_type = "NIFTI_GZ" >>> fugue.cmdline #doctest: +ELLIPSIS 'fugue --in=epi.nii --mask=epi_mask.nii --loadshift=vsm.nii --unwarpdir=y --unwarp=epi_unwarped.nii.gz' >>> fugue.run() #doctest: +SKIP Warping an input image (shift map is known) >>> from nipype.interfaces.fsl.preprocess import FUGUE >>> fugue = FUGUE() >>> fugue.inputs.in_file = 'epi.nii' >>> fugue.inputs.forward_warping = True >>> fugue.inputs.mask_file = 'epi_mask.nii' >>> fugue.inputs.shift_in_file = 'vsm.nii' # Previously computed with fugue as well >>> fugue.inputs.unwarp_direction = 'y' >>> fugue.inputs.output_type = "NIFTI_GZ" >>> fugue.cmdline #doctest: +ELLIPSIS 'fugue --in=epi.nii --mask=epi_mask.nii --loadshift=vsm.nii --unwarpdir=y --warp=epi_warped.nii.gz' >>> fugue.run() #doctest: +SKIP Computing the vsm (unwrapped phase map is known) >>> from nipype.interfaces.fsl.preprocess import FUGUE >>> fugue = FUGUE() >>> fugue.inputs.phasemap_in_file = 'epi_phasediff.nii' >>> fugue.inputs.mask_file = 'epi_mask.nii' >>> fugue.inputs.dwell_to_asym_ratio = (0.77e-3 * 3) / 2.46e-3 >>> fugue.inputs.unwarp_direction = 'y' >>> fugue.inputs.save_shift = True >>> fugue.inputs.output_type = "NIFTI_GZ" >>> fugue.cmdline #doctest: +ELLIPSIS 'fugue --dwelltoasym=0.9390243902 --mask=epi_mask.nii --phasemap=epi_phasediff.nii --saveshift=epi_phasediff_vsm.nii.gz --unwarpdir=y' >>> fugue.run() #doctest: +SKIP """ _cmd = 'fugue' input_spec = FUGUEInputSpec output_spec = FUGUEOutputSpec def _parse_inputs(self, skip=None): if skip is None: skip = [] input_phase = isdefined(self.inputs.phasemap_in_file) input_vsm = isdefined(self.inputs.shift_in_file) input_fmap = isdefined(self.inputs.fmap_in_file) if not input_phase and not input_vsm and not input_fmap: raise RuntimeError('Either phasemap_in_file, shift_in_file or fmap_in_file must be set.') if not isdefined(self.inputs.in_file): skip += ['unwarped_file', 'warped_file'] else: if self.inputs.forward_warping: skip += ['unwarped_file'] trait_spec = self.inputs.trait('warped_file') trait_spec.name_template = "%s_warped" trait_spec.name_source = 'in_file' trait_spec.output_name = 'warped_file' else: skip += ['warped_file'] trait_spec = self.inputs.trait('unwarped_file') trait_spec.name_template = "%s_unwarped" trait_spec.name_source = 'in_file' trait_spec.output_name = 'unwarped_file' # Handle shift output if not isdefined(self.inputs.shift_out_file): vsm_save_masked = (isdefined(self.inputs.save_shift) and self.inputs.save_shift) vsm_save_unmasked = (isdefined(self.inputs.save_unmasked_shift) and self.inputs.save_unmasked_shift) if (vsm_save_masked or vsm_save_unmasked): trait_spec = self.inputs.trait('shift_out_file') trait_spec.output_name = 'shift_out_file' if input_fmap: trait_spec.name_source = 'fmap_in_file' elif input_phase: trait_spec.name_source = 'phasemap_in_file' elif input_vsm: trait_spec.name_source = 'shift_in_file' else: raise RuntimeError(('Either phasemap_in_file, shift_in_file or ' 'fmap_in_file must be set.')) if vsm_save_unmasked: trait_spec.name_template = '%s_vsm_unmasked' else: trait_spec.name_template = '%s_vsm' else: skip += ['save_shift', 'save_unmasked_shift', 'shift_out_file'] # Handle fieldmap output if not isdefined(self.inputs.fmap_out_file): fmap_save_masked = (isdefined(self.inputs.save_fmap) and self.inputs.save_fmap) fmap_save_unmasked = (isdefined(self.inputs.save_unmasked_fmap) and self.inputs.save_unmasked_fmap) if (fmap_save_masked or fmap_save_unmasked): trait_spec = self.inputs.trait('fmap_out_file') trait_spec.output_name = 'fmap_out_file' if input_vsm: trait_spec.name_source = 'shift_in_file' elif input_phase: trait_spec.name_source = 'phasemap_in_file' elif input_fmap: trait_spec.name_source = 'fmap_in_file' else: raise RuntimeError(('Either phasemap_in_file, shift_in_file or ' 'fmap_in_file must be set.')) if fmap_save_unmasked: trait_spec.name_template = '%s_fieldmap_unmasked' else: trait_spec.name_template = '%s_fieldmap' else: skip += ['save_fmap', 'save_unmasked_fmap', 'fmap_out_file'] return super(FUGUE, self)._parse_inputs(skip=skip) class PRELUDEInputSpec(FSLCommandInputSpec): complex_phase_file = File(exists=True, argstr='--complex=%s', mandatory=True, xor=[ 'magnitude_file', 'phase_file'], desc='complex phase input volume') magnitude_file = File(exists=True, argstr='--abs=%s', mandatory=True, xor=['complex_phase_file'], desc='file containing magnitude image') phase_file = File(exists=True, argstr='--phase=%s', mandatory=True, xor=['complex_phase_file'], desc='raw phase file') unwrapped_phase_file = File(genfile=True, argstr='--unwrap=%s', desc='file containing unwrapepd phase', hash_files=False) num_partitions = traits.Int(argstr='--numphasesplit=%d', desc='number of phase partitions to use') labelprocess2d = traits.Bool(argstr='--labelslices', desc='does label processing in 2D (slice at a time)') process2d = traits.Bool(argstr='--slices', xor=['labelprocess2d'], desc='does all processing in 2D (slice at a time)') process3d = traits.Bool(argstr='--force3D', xor=['labelprocess2d', 'process2d'], desc='forces all processing to be full 3D') threshold = traits.Float(argstr='--thresh=%.10f', desc='intensity threshold for masking') mask_file = File(exists=True, argstr='--mask=%s', desc='filename of mask input volume') start = traits.Int(argstr='--start=%d', desc='first image number to process (default 0)') end = traits.Int(argstr='--end=%d', desc='final image number to process (default Inf)') savemask_file = File(argstr='--savemask=%s', desc='saving the mask volume', hash_files=False) rawphase_file = File(argstr='--rawphase=%s', desc='saving the raw phase output', hash_files=False) label_file = File(argstr='--labels=%s', desc='saving the area labels output', hash_files=False) removeramps = traits.Bool(argstr='--removeramps', desc='remove phase ramps during unwrapping') class PRELUDEOutputSpec(TraitedSpec): unwrapped_phase_file = File(exists=True, desc='unwrapped phase file') class PRELUDE(FSLCommand): """Use FSL prelude to do phase unwrapping Examples -------- Please insert examples for use of this command """ input_spec = PRELUDEInputSpec output_spec = PRELUDEOutputSpec _cmd = 'prelude' def __init__(self, **kwargs): super(PRELUDE, self).__init__(**kwargs) warn('This has not been fully tested. Please report any failures.') def _list_outputs(self): outputs = self._outputs().get() out_file = self.inputs.unwrapped_phase_file if not isdefined(out_file): if isdefined(self.inputs.phase_file): out_file = self._gen_fname(self.inputs.phase_file, suffix='_unwrapped') elif isdefined(self.inputs.complex_phase_file): out_file = self._gen_fname(self.inputs.complex_phase_file, suffix='_phase_unwrapped') outputs['unwrapped_phase_file'] = os.path.abspath(out_file) return outputs def _gen_filename(self, name): if name == 'unwrapped_phase_file': return self._list_outputs()['unwrapped_phase_file'] return None class FIRSTInputSpec(FSLCommandInputSpec): in_file = File( exists=True, mandatory=True, position=-2, copyfile=False, argstr='-i %s', desc='input data file') out_file = File( 'segmented', usedefault=True, mandatory=True, position=-1, argstr='-o %s', desc='output data file', hash_files=False) verbose = traits.Bool(argstr='-v', position=1, desc="Use verbose logging.") brain_extracted = traits.Bool( argstr='-b', position=2, desc="Input structural image is already brain-extracted") no_cleanup = traits.Bool( argstr='-d', position=3, desc="Input structural image is already brain-extracted") method = traits.Enum( 'auto', 'fast', 'none', xor=['method_as_numerical_threshold'], argstr='-m %s', position=4, usedefault=True, desc=("Method must be one of auto, fast, none, or it can be entered " "using the 'method_as_numerical_threshold' input")) method_as_numerical_threshold = traits.Float( argstr='-m %.4f', position=4, desc=("Specify a numerical threshold value or use the 'method' input " "to choose auto, fast, or none")) list_of_specific_structures = traits.List( traits.Str, argstr='-s %s', sep=',', position=5, minlen=1, desc='Runs only on the specified structures (e.g. L_Hipp, R_Hipp' 'L_Accu, R_Accu, L_Amyg, R_Amyg' 'L_Caud, R_Caud, L_Pall, R_Pall' 'L_Puta, R_Puta, L_Thal, R_Thal, BrStem') affine_file = File( exists=True, position=6, argstr='-a %s', desc=('Affine matrix to use (e.g. img2std.mat) (does not ' 're-run registration)')) class FIRSTOutputSpec(TraitedSpec): vtk_surfaces = OutputMultiPath( File(exists=True), desc='VTK format meshes for each subcortical region') bvars = OutputMultiPath( File(exists=True), desc='bvars for each subcortical region') original_segmentations = File( exists=True, desc=('3D image file containing the segmented regions ' 'as integer values. Uses CMA labelling')) segmentation_file = File( exists=True, desc=('4D image file containing a single volume per ' 'segmented region')) class FIRST(FSLCommand): """ Use FSL's run_first_all command to segment subcortical volumes http://www.fmrib.ox.ac.uk/fsl/first/index.html Examples -------- >>> from nipype.interfaces import fsl >>> first = fsl.FIRST() >>> first.inputs.in_file = 'structural.nii' >>> first.inputs.out_file = 'segmented.nii' >>> res = first.run() #doctest: +SKIP """ _cmd = 'run_first_all' input_spec = FIRSTInputSpec output_spec = FIRSTOutputSpec def _list_outputs(self): outputs = self.output_spec().get() if isdefined(self.inputs.list_of_specific_structures): structures = self.inputs.list_of_specific_structures else: structures = ['L_Hipp', 'R_Hipp', 'L_Accu', 'R_Accu', 'L_Amyg', 'R_Amyg', 'L_Caud', 'R_Caud', 'L_Pall', 'R_Pall', 'L_Puta', 'R_Puta', 'L_Thal', 'R_Thal', 'BrStem'] outputs['original_segmentations'] = \ self._gen_fname('original_segmentations') outputs['segmentation_file'] = self._gen_fname('segmentation_file') outputs['vtk_surfaces'] = self._gen_mesh_names('vtk_surfaces', structures) outputs['bvars'] = self._gen_mesh_names('bvars', structures) return outputs def _gen_fname(self, name): path, outname, ext = split_filename(self.inputs.out_file) method = 'none' if isdefined(self.inputs.method) and self.inputs.method == 'fast': method = 'fast' if isdefined(self.inputs.method_as_numerical_threshold): thres = '%.4f' % self.inputs.method_as_numerical_threshold method = thres.replace('.', '') if name == 'original_segmentations': return op.abspath('%s_all_%s_origsegs.nii.gz' % (outname, method)) if name == 'segmentation_file': return op.abspath('%s_all_%s_firstseg.nii.gz' % (outname, method)) return None def _gen_mesh_names(self, name, structures): path, prefix, ext = split_filename(self.inputs.out_file) if name == 'vtk_surfaces': vtks = list() for struct in structures: vtk = prefix + '-' + struct + '_first.vtk' vtks.append(op.abspath(vtk)) return vtks if name == 'bvars': bvars = list() for struct in structures: bvar = prefix + '-' + struct + '_first.bvars' bvars.append(op.abspath(bvar)) return bvars return None
bsd-3-clause
lovewinds/story-project
external/scripts/packages/pugixml/ios.py
1
3515
#!/usr/bin/env python3 import os from shutil import copytree, copy2 from pathlib import Path from scripts.build_env import BuildEnv, Platform from scripts.platform_builder import PlatformBuilder class pugixmliOSBuilder(PlatformBuilder): def __init__(self, config_package: dict=None, config_platform: dict=None): super().__init__(config_package, config_platform) def build(self): build_path = '{}/{}/scripts/build'.format( self.env.source_path, self.config['name'] ) _check = self.env.install_lib_path / self.config.get("checker") if os.path.exists(_check): self.tag_log("Already built.") return self.tag_log("Start building ...") BuildEnv.mkdir_p(build_path) os.chdir(build_path) cmd = 'CMD_PREFIX={} {}/ios-build.sh pugixml arm64'.format( self.env.install_path, self.env.working_path ) self.env.run_command(cmd, module_name=self.config['name']) cmd = 'CMD_PREFIX={} {}/ios-build.sh pugixml armv7'.format( self.env.install_path, self.env.working_path ) self.env.run_command(cmd, module_name=self.config['name']) def post(self): # Copy header files also include_path = '{}/{}/src'.format( self.env.source_path, self.config['name'] ) _path = Path(include_path) _files = [x for x in _path.iterdir() if x.is_file()] for ff in _files: if not ff.name.endswith('.hpp') and not ff.name.endswith('.h'): continue copy2(str(ff), self.env.install_include_path) self.create_framework_iOS() def create_framework_iOS(self): # Required path include_path = '{}/{}/src'.format( self.env.source_path, self.config['name'] ) _framework_dir = '{}/{}.framework'.format( self.env.framework_path, self.config['name'], ) _framework_header_dir = '{}/{}.framework/Headers'.format( self.env.framework_path, self.config['name'], ) _framework_resource_dir = '{}/{}.framework/Resources'.format( self.env.framework_path, self.config['name'], ) # Copy headers into Framework directory self.tag_log("Framework : Copying header ...") BuildEnv.mkdir_p(_framework_header_dir) _path = Path(include_path) _files = [x for x in _path.iterdir() if x.is_file()] for ff in _files: if not ff.name.endswith('.hpp') and not ff.name.endswith('.h'): continue copy2(str(ff), _framework_header_dir) # Copy binaries self.tag_log("Framework : Copying binary ...") BuildEnv.mkdir_p(_framework_dir) _lib_src_file = '{}/libpugixml.a'.format(self.env.install_lib_path) _lib_dst_file = '{}/pugixml'.format(_framework_dir) copy2(_lib_src_file, _lib_dst_file) # Create plist BuildEnv.mkdir_p(_framework_resource_dir) plist_str = self.env.apple_framework_plist.replace( '${FRAMEWORK_NAME}', self.config['name'] ).replace( '${FRAMEWORK_CURRENT_VERSION}', self.config['name'] ) plist_file = '{}/Info.plist'.format(_framework_resource_dir) with open(plist_file, "w") as pf: pf.write(plist_str)
gpl-2.0
zp312/gmmreg
Python/_plotting.py
14
2435
#!/usr/bin/env python #coding=utf-8 ##==================================================== ## $Author$ ## $Date$ ## $Revision$ ##==================================================== from pylab import * from configobj import ConfigObj import matplotlib.pyplot as plt def display2Dpointset(A): fig = plt.figure() ax = fig.add_subplot(111) #ax.grid(True) ax.plot(A[:,0],A[:,1],'yo',markersize=8,mew=1) labels = plt.getp(plt.gca(), 'xticklabels') plt.setp(labels, color='k', fontweight='bold') labels = plt.getp(plt.gca(), 'yticklabels') plt.setp(labels, color='k', fontweight='bold') for i,x in enumerate(A): ax.annotate('%d'%(i+1), xy = x, xytext = x + 0) ax.set_axis_off() #fig.show() def display2Dpointsets(A, B, ax = None): """ display a pair of 2D point sets """ if not ax: fig = plt.figure() ax = fig.add_subplot(111) ax.plot(A[:,0],A[:,1],'yo',markersize=8,mew=1) ax.plot(B[:,0],B[:,1],'b+',markersize=8,mew=1) #pylab.setp(pylab.gca(), 'xlim', [-0.15,0.6]) labels = plt.getp(plt.gca(), 'xticklabels') plt.setp(labels, color='k', fontweight='bold') labels = plt.getp(plt.gca(), 'yticklabels') plt.setp(labels, color='k', fontweight='bold') def display3Dpointsets(A,B,ax): #ax.plot3d(A[:,0],A[:,1],A[:,2],'yo',markersize=10,mew=1) #ax.plot3d(B[:,0],B[:,1],B[:,2],'b+',markersize=10,mew=1) ax.scatter(A[:,0],A[:,1],A[:,2], c = 'y', marker = 'o') ax.scatter(B[:,0],B[:,1],B[:,2], c = 'b', marker = '+') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') from mpl_toolkits.mplot3d import Axes3D def displayABC(A,B,C): fig = plt.figure() dim = A.shape[1] if dim==2: ax = plt.subplot(121) display2Dpointsets(A, B, ax) ax = plt.subplot(122) display2Dpointsets(C, B, ax) if dim==3: plot1 = plt.subplot(1,2,1) ax = Axes3D(fig, rect = plot1.get_position()) display3Dpointsets(A,B,ax) plot2 = plt.subplot(1,2,2) ax = Axes3D(fig, rect = plot2.get_position()) display3Dpointsets(C,B,ax) plt.show() def display_pts(f_config): config = ConfigObj(f_config) file_section = config['FILES'] mf = file_section['model'] sf = file_section['scene'] tf = file_section['transformed_model'] m = np.loadtxt(mf) s = np.loadtxt(sf) t = np.loadtxt(tf) displayABC(m,s,t)
gpl-3.0
aduggan/rpi-linux
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
12980
5411
# SchedGui.py - Python extension for perf script, basic GUI code for # traces drawing and overview. # # Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com> # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. try: import wx except ImportError: raise ImportError, "You need to install the wxpython lib for this script" class RootFrame(wx.Frame): Y_OFFSET = 100 RECT_HEIGHT = 100 RECT_SPACE = 50 EVENT_MARKING_WIDTH = 5 def __init__(self, sched_tracer, title, parent = None, id = -1): wx.Frame.__init__(self, parent, id, title) (self.screen_width, self.screen_height) = wx.GetDisplaySize() self.screen_width -= 10 self.screen_height -= 10 self.zoom = 0.5 self.scroll_scale = 20 self.sched_tracer = sched_tracer self.sched_tracer.set_root_win(self) (self.ts_start, self.ts_end) = sched_tracer.interval() self.update_width_virtual() self.nr_rects = sched_tracer.nr_rectangles() + 1 self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) # whole window panel self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height)) # scrollable container self.scroll = wx.ScrolledWindow(self.panel) self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale) self.scroll.EnableScrolling(True, True) self.scroll.SetFocus() # scrollable drawing area self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2)) self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint) self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Bind(wx.EVT_PAINT, self.on_paint) self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Fit() self.Fit() self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING) self.txt = None self.Show(True) def us_to_px(self, val): return val / (10 ** 3) * self.zoom def px_to_us(self, val): return (val / self.zoom) * (10 ** 3) def scroll_start(self): (x, y) = self.scroll.GetViewStart() return (x * self.scroll_scale, y * self.scroll_scale) def scroll_start_us(self): (x, y) = self.scroll_start() return self.px_to_us(x) def paint_rectangle_zone(self, nr, color, top_color, start, end): offset_px = self.us_to_px(start - self.ts_start) width_px = self.us_to_px(end - self.ts_start) offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) width_py = RootFrame.RECT_HEIGHT dc = self.dc if top_color is not None: (r, g, b) = top_color top_color = wx.Colour(r, g, b) brush = wx.Brush(top_color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH) width_py -= RootFrame.EVENT_MARKING_WIDTH offset_py += RootFrame.EVENT_MARKING_WIDTH (r ,g, b) = color color = wx.Colour(r, g, b) brush = wx.Brush(color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, width_py) def update_rectangles(self, dc, start, end): start += self.ts_start end += self.ts_start self.sched_tracer.fill_zone(start, end) def on_paint(self, event): dc = wx.PaintDC(self.scroll_panel) self.dc = dc width = min(self.width_virtual, self.screen_width) (x, y) = self.scroll_start() start = self.px_to_us(x) end = self.px_to_us(x + width) self.update_rectangles(dc, start, end) def rect_from_ypixel(self, y): y -= RootFrame.Y_OFFSET rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT: return -1 return rect def update_summary(self, txt): if self.txt: self.txt.Destroy() self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50)) def on_mouse_down(self, event): (x, y) = event.GetPositionTuple() rect = self.rect_from_ypixel(y) if rect == -1: return t = self.px_to_us(x) + self.ts_start self.sched_tracer.mouse_down(rect, t) def update_width_virtual(self): self.width_virtual = self.us_to_px(self.ts_end - self.ts_start) def __zoom(self, x): self.update_width_virtual() (xpos, ypos) = self.scroll.GetViewStart() xpos = self.us_to_px(x) / self.scroll_scale self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos) self.Refresh() def zoom_in(self): x = self.scroll_start_us() self.zoom *= 2 self.__zoom(x) def zoom_out(self): x = self.scroll_start_us() self.zoom /= 2 self.__zoom(x) def on_key_press(self, event): key = event.GetRawKeyCode() if key == ord("+"): self.zoom_in() return if key == ord("-"): self.zoom_out() return key = event.GetKeyCode() (x, y) = self.scroll.GetViewStart() if key == wx.WXK_RIGHT: self.scroll.Scroll(x + 1, y) elif key == wx.WXK_LEFT: self.scroll.Scroll(x - 1, y) elif key == wx.WXK_DOWN: self.scroll.Scroll(x, y + 1) elif key == wx.WXK_UP: self.scroll.Scroll(x, y - 1)
gpl-2.0
MobileWebApps/backend-python-rest-gae
lib/markdown/extensions/meta.py
19
2761
""" Meta Data Extension for Python-Markdown ======================================= This extension adds Meta Data handling to markdown. Basic Usage: >>> import markdown >>> text = '''Title: A Test Doc. ... Author: Waylan Limberg ... John Doe ... Blank_Data: ... ... The body. This is paragraph one. ... ''' >>> md = markdown.Markdown(['meta']) >>> print md.convert(text) <p>The body. This is paragraph one.</p> >>> print md.Meta {u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']} Make sure text without Meta Data still works (markdown < 1.6b returns a <p>). >>> text = ' Some Code - not extra lines of meta data.' >>> md = markdown.Markdown(['meta']) >>> print md.convert(text) <pre><code>Some Code - not extra lines of meta data. </code></pre> >>> md.Meta {} Copyright 2007-2008 [Waylan Limberg](http://achinghead.com). Project website: <http://packages.python.org/Markdown/meta_data.html> Contact: markdown@freewisdom.org License: BSD (see ../LICENSE.md for details) """ from __future__ import absolute_import from __future__ import unicode_literals from . import Extension from ..preprocessors import Preprocessor import re # Global Vars META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)') META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)') class MetaExtension (Extension): """ Meta-Data extension for Python-Markdown. """ def extendMarkdown(self, md, md_globals): """ Add MetaPreprocessor to Markdown instance. """ md.preprocessors.add("meta", MetaPreprocessor(md), "_begin") class MetaPreprocessor(Preprocessor): """ Get Meta-Data. """ def run(self, lines): """ Parse Meta-Data and store in Markdown.Meta. """ meta = {} key = None while lines: line = lines.pop(0) if line.strip() == '': break # blank line - done m1 = META_RE.match(line) if m1: key = m1.group('key').lower().strip() value = m1.group('value').strip() try: meta[key].append(value) except KeyError: meta[key] = [value] else: m2 = META_MORE_RE.match(line) if m2 and key: # Add another line to existing key meta[key].append(m2.group('value').strip()) else: lines.insert(0, line) break # no meta data - done self.markdown.Meta = meta return lines def makeExtension(configs={}): return MetaExtension(configs=configs)
bsd-3-clause
dchaplinsky/pep.org.ua
pepdb/tasks/management/commands/apply_adhoc_corrupt.py
1
2423
# -*- coding: utf-8 -*- from __future__ import unicode_literals from time import sleep from django.core.management.base import BaseCommand from django.conf import settings from django.utils.translation import activate from dateutil.parser import parse as dt_parse import tqdm from elasticsearch_dsl import Q from core.utils import render_date from core.models import Person from tasks.models import AdHocMatch class Command(BaseCommand): help = "Add data from matches with database of corruption crimes to the PEP db" def add_arguments(self, parser): parser.add_argument( "--real_run", default=False, action="store_true", help="Add matched data to wiki articles", ) def handle(self, *args, **options): q = AdHocMatch.objects.filter(dataset_id="corrupt", applied=False, status="a") wiki_updated = 0 activate(settings.LANGUAGE_CODE) with tqdm.tqdm(total=q.count()) as pbar: for match in q.select_related("person").nocache().iterator(): pbar.update(1) if not match.person: continue addition_to_wiki = "<p>{} {} притягувався до відповідальності за корупційне правопорушення, а саме за {}{} {}, деталі можна дізнатися в судовій справі № {}.</p>".format( match.matched_json["FIO"], match.matched_json["DATE_NAK_DST"], match.matched_json["STAT"] , match.matched_json["SPOS_VCH_DP"], match.matched_json["SKLAD_COR_PR"], match.matched_json["NUM_NAK_DST"] or match.matched_json["NUM_SUD_R"], ).replace(" ,", ",").replace(",,", ",") match.person.wiki_uk = (match.person.wiki_uk or "") + "\n{}".format( addition_to_wiki ) wiki_updated += 1 self.stdout.write("Updating page {}{}".format( settings.SITE_URL, match.person.get_absolute_url()) ) match.applied = True if options["real_run"]: match.person.save() match.save() self.stdout.write("Wiki updated: {}".format(wiki_updated))
mit
rizumu/django
tests/postgres_tests/models.py
231
3562
from django.db import connection, models from .fields import ( ArrayField, BigIntegerRangeField, DateRangeField, DateTimeRangeField, FloatRangeField, HStoreField, IntegerRangeField, JSONField, ) class PostgreSQLModel(models.Model): class Meta: abstract = True required_db_vendor = 'postgresql' class IntegerArrayModel(PostgreSQLModel): field = ArrayField(models.IntegerField()) class NullableIntegerArrayModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), blank=True, null=True) class CharArrayModel(PostgreSQLModel): field = ArrayField(models.CharField(max_length=10)) class DateTimeArrayModel(PostgreSQLModel): datetimes = ArrayField(models.DateTimeField()) dates = ArrayField(models.DateField()) times = ArrayField(models.TimeField()) class NestedIntegerArrayModel(PostgreSQLModel): field = ArrayField(ArrayField(models.IntegerField())) class OtherTypesArrayModel(PostgreSQLModel): ips = ArrayField(models.GenericIPAddressField()) uuids = ArrayField(models.UUIDField()) decimals = ArrayField(models.DecimalField(max_digits=5, decimal_places=2)) class HStoreModel(PostgreSQLModel): field = HStoreField(blank=True, null=True) class CharFieldModel(models.Model): field = models.CharField(max_length=16) class TextFieldModel(models.Model): field = models.TextField() # Only create this model for postgres >= 9.2 if connection.vendor == 'postgresql' and connection.pg_version >= 90200: class RangesModel(PostgreSQLModel): ints = IntegerRangeField(blank=True, null=True) bigints = BigIntegerRangeField(blank=True, null=True) floats = FloatRangeField(blank=True, null=True) timestamps = DateTimeRangeField(blank=True, null=True) dates = DateRangeField(blank=True, null=True) class RangeLookupsModel(PostgreSQLModel): parent = models.ForeignKey(RangesModel, models.SET_NULL, blank=True, null=True) integer = models.IntegerField(blank=True, null=True) big_integer = models.BigIntegerField(blank=True, null=True) float = models.FloatField(blank=True, null=True) timestamp = models.DateTimeField(blank=True, null=True) date = models.DateField(blank=True, null=True) else: # create an object with this name so we don't have failing imports class RangesModel(object): pass class RangeLookupsModel(object): pass # Only create this model for postgres >= 9.4 if connection.vendor == 'postgresql' and connection.pg_version >= 90400: class JSONModel(models.Model): field = JSONField(blank=True, null=True) else: # create an object with this name so we don't have failing imports class JSONModel(object): pass class ArrayFieldSubclass(ArrayField): def __init__(self, *args, **kwargs): super(ArrayFieldSubclass, self).__init__(models.IntegerField()) class AggregateTestModel(models.Model): """ To test postgres-specific general aggregation functions """ char_field = models.CharField(max_length=30, blank=True) integer_field = models.IntegerField(null=True) boolean_field = models.NullBooleanField() class StatTestModel(models.Model): """ To test postgres-specific aggregation functions for statistics """ int1 = models.IntegerField() int2 = models.IntegerField() related_field = models.ForeignKey(AggregateTestModel, models.SET_NULL, null=True) class NowTestModel(models.Model): when = models.DateTimeField(null=True, default=None)
bsd-3-clause
enddo/smod
System/Lib/scapy/contrib/skinny.py
10
18357
#! /usr/bin/env python # scapy.contrib.description = Skinny Call Control Protocol (SCCP) # scapy.contrib.status = loads ############################################################################# ## ## ## scapy-skinny.py --- Skinny Call Control Protocol (SCCP) extension ## ## ## ## Copyright (C) 2006 Nicolas Bareil <nicolas.bareil@ eads.net> ## ## EADS/CRC security team ## ## ## ## This program is free software; you can redistribute it and/or modify it ## ## under the terms of the GNU General Public License version 2 as ## ## published by the Free Software Foundation; version 2. ## ## ## ## This program is distributed in the hope that it will be useful, but ## ## WITHOUT ANY WARRANTY; without even the implied warranty of ## ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## ## General Public License for more details. ## ## ## ############################################################################# from scapy.all import * import __builtin__ ##################################################################### # Helpers and constants ##################################################################### skinny_messages_cls = { # Station -> Callmanager 0x0000: "SkinnyMessageKeepAlive", 0x0001: "SkinnyMessageRegister", 0x0002: "SkinnyMessageIpPort", 0x0003: "SkinnyMessageKeypadButton", 0x0004: "SkinnyMessageEnblocCall", 0x0005: "SkinnyMessageStimulus", 0x0006: "SkinnyMessageOffHook", 0x0007: "SkinnyMessageOnHook", 0x0008: "SkinnyMessageHookFlash", 0x0009: "SkinnyMessageForwardStatReq", 0x000A: "SkinnyMessageSpeedDialStatReq", 0x000B: "SkinnyMessageLineStatReq", 0x000C: "SkinnyMessageConfigStatReq", 0x000D: "SkinnyMessageTimeDateReq", 0x000E: "SkinnyMessageButtonTemplateReq", 0x000F: "SkinnyMessageVersionReq", 0x0010: "SkinnyMessageCapabilitiesRes", 0x0011: "SkinnyMessageMediaPortList", 0x0012: "SkinnyMessageServerReq", 0x0020: "SkinnyMessageAlarm", 0x0021: "SkinnyMessageMulticastMediaReceptionAck", 0x0022: "SkinnyMessageOpenReceiveChannelAck", 0x0023: "SkinnyMessageConnectionStatisticsRes", 0x0024: "SkinnyMessageOffHookWithCgpn", 0x0025: "SkinnyMessageSoftKeySetReq", 0x0026: "SkinnyMessageSoftKeyEvent", 0x0027: "SkinnyMessageUnregister", 0x0028: "SkinnyMessageSoftKeyTemplateReq", 0x0029: "SkinnyMessageRegisterTokenReq", 0x002A: "SkinnyMessageMediaTransmissionFailure", 0x002B: "SkinnyMessageHeadsetStatus", 0x002C: "SkinnyMessageMediaResourceNotification", 0x002D: "SkinnyMessageRegisterAvailableLines", 0x002E: "SkinnyMessageDeviceToUserData", 0x002F: "SkinnyMessageDeviceToUserDataResponse", 0x0030: "SkinnyMessageUpdateCapabilities", 0x0031: "SkinnyMessageOpenMultiMediaReceiveChannelAck", 0x0032: "SkinnyMessageClearConference", 0x0033: "SkinnyMessageServiceURLStatReq", 0x0034: "SkinnyMessageFeatureStatReq", 0x0035: "SkinnyMessageCreateConferenceRes", 0x0036: "SkinnyMessageDeleteConferenceRes", 0x0037: "SkinnyMessageModifyConferenceRes", 0x0038: "SkinnyMessageAddParticipantRes", 0x0039: "SkinnyMessageAuditConferenceRes", 0x0040: "SkinnyMessageAuditParticipantRes", 0x0041: "SkinnyMessageDeviceToUserDataVersion1", # Callmanager -> Station */ 0x0081: "SkinnyMessageRegisterAck", 0x0082: "SkinnyMessageStartTone", 0x0083: "SkinnyMessageStopTone", 0x0085: "SkinnyMessageSetRinger", 0x0086: "SkinnyMessageSetLamp", 0x0087: "SkinnyMessageSetHkFDetect", 0x0088: "SkinnyMessageSpeakerMode", 0x0089: "SkinnyMessageSetMicroMode", 0x008A: "SkinnyMessageStartMediaTransmission", 0x008B: "SkinnyMessageStopMediaTransmission", 0x008C: "SkinnyMessageStartMediaReception", 0x008D: "SkinnyMessageStopMediaReception", 0x008F: "SkinnyMessageCallInfo", 0x0090: "SkinnyMessageForwardStat", 0x0091: "SkinnyMessageSpeedDialStat", 0x0092: "SkinnyMessageLineStat", 0x0093: "SkinnyMessageConfigStat", 0x0094: "SkinnyMessageTimeDate", 0x0095: "SkinnyMessageStartSessionTransmission", 0x0096: "SkinnyMessageStopSessionTransmission", 0x0097: "SkinnyMessageButtonTemplate", 0x0098: "SkinnyMessageVersion", 0x0099: "SkinnyMessageDisplayText", 0x009A: "SkinnyMessageClearDisplay", 0x009B: "SkinnyMessageCapabilitiesReq", 0x009C: "SkinnyMessageEnunciatorCommand", 0x009D: "SkinnyMessageRegisterReject", 0x009E: "SkinnyMessageServerRes", 0x009F: "SkinnyMessageReset", 0x0100: "SkinnyMessageKeepAliveAck", 0x0101: "SkinnyMessageStartMulticastMediaReception", 0x0102: "SkinnyMessageStartMulticastMediaTransmission", 0x0103: "SkinnyMessageStopMulticastMediaReception", 0x0104: "SkinnyMessageStopMulticastMediaTransmission", 0x0105: "SkinnyMessageOpenReceiveChannel", 0x0106: "SkinnyMessageCloseReceiveChannel", 0x0107: "SkinnyMessageConnectionStatisticsReq", 0x0108: "SkinnyMessageSoftKeyTemplateRes", 0x0109: "SkinnyMessageSoftKeySetRes", 0x0110: "SkinnyMessageSoftKeyEvent", 0x0111: "SkinnyMessageCallState", 0x0112: "SkinnyMessagePromptStatus", 0x0113: "SkinnyMessageClearPromptStatus", 0x0114: "SkinnyMessageDisplayNotify", 0x0115: "SkinnyMessageClearNotify", 0x0116: "SkinnyMessageCallPlane", 0x0117: "SkinnyMessageCallPlane", 0x0118: "SkinnyMessageUnregisterAck", 0x0119: "SkinnyMessageBackSpaceReq", 0x011A: "SkinnyMessageRegisterTokenAck", 0x011B: "SkinnyMessageRegisterTokenReject", 0x0042: "SkinnyMessageDeviceToUserDataResponseVersion1", 0x011C: "SkinnyMessageStartMediaFailureDetection", 0x011D: "SkinnyMessageDialedNumber", 0x011E: "SkinnyMessageUserToDeviceData", 0x011F: "SkinnyMessageFeatureStat", 0x0120: "SkinnyMessageDisplayPriNotify", 0x0121: "SkinnyMessageClearPriNotify", 0x0122: "SkinnyMessageStartAnnouncement", 0x0123: "SkinnyMessageStopAnnouncement", 0x0124: "SkinnyMessageAnnouncementFinish", 0x0127: "SkinnyMessageNotifyDtmfTone", 0x0128: "SkinnyMessageSendDtmfTone", 0x0129: "SkinnyMessageSubscribeDtmfPayloadReq", 0x012A: "SkinnyMessageSubscribeDtmfPayloadRes", 0x012B: "SkinnyMessageSubscribeDtmfPayloadErr", 0x012C: "SkinnyMessageUnSubscribeDtmfPayloadReq", 0x012D: "SkinnyMessageUnSubscribeDtmfPayloadRes", 0x012E: "SkinnyMessageUnSubscribeDtmfPayloadErr", 0x012F: "SkinnyMessageServiceURLStat", 0x0130: "SkinnyMessageCallSelectStat", 0x0131: "SkinnyMessageOpenMultiMediaChannel", 0x0132: "SkinnyMessageStartMultiMediaTransmission", 0x0133: "SkinnyMessageStopMultiMediaTransmission", 0x0134: "SkinnyMessageMiscellaneousCommand", 0x0135: "SkinnyMessageFlowControlCommand", 0x0136: "SkinnyMessageCloseMultiMediaReceiveChannel", 0x0137: "SkinnyMessageCreateConferenceReq", 0x0138: "SkinnyMessageDeleteConferenceReq", 0x0139: "SkinnyMessageModifyConferenceReq", 0x013A: "SkinnyMessageAddParticipantReq", 0x013B: "SkinnyMessageDropParticipantReq", 0x013C: "SkinnyMessageAuditConferenceReq", 0x013D: "SkinnyMessageAuditParticipantReq", 0x013F: "SkinnyMessageUserToDeviceDataVersion1", } skinny_callstates = { 0x1: "Off Hook", 0x2: "On Hook", 0x3: "Ring out", 0xc: "Proceeding", } skinny_ring_type = { 0x1: "Ring off" } skinny_speaker_modes = { 0x1: "Speaker on", 0x2: "Speaker off" } skinny_lamp_mode = { 0x1: "Off (?)", 0x2: "On", } skinny_stimulus = { 0x9: "Line" } ############ ## Fields ## ############ class SkinnyDateTimeField(StrFixedLenField): def __init__(self, name, default): StrFixedLenField.__init__(self, name, default, 32) def m2i(self, pkt, s): year,month,dow,day,hour,min,sec,milisecond=struct.unpack('<8I', s) return (year, month, day, hour, min, sec) def i2m(self, pkt, val): if type(val) is str: val = self.h2i(pkt, val) l= val[:2] + (0,) + val[2:7] + (0,) return struct.pack('<8I', *l) def i2h(self, pkt, x): if type(x) is str: return x else: return time.ctime(time.mktime(x+(0,0,0))) def i2repr(self, pkt, x): return self.i2h(pkt, x) def h2i(self, pkt, s): t = () if type(s) is str: t = time.strptime(s) t = t[:2] + t[2:-3] else: if not s: y,m,d,h,min,sec,rest,rest,rest = time.gmtime(time.time()) t = (y,m,d,h,min,sec) else: t=s return t ########################### ## Packet abstract class ## ########################### class SkinnyMessageGeneric(Packet): name='Generic message' class SkinnyMessageKeepAlive(Packet): name='keep alive' class SkinnyMessageKeepAliveAck(Packet): name='keep alive ack' class SkinnyMessageOffHook(Packet): name = 'Off Hook' fields_desc = [ LEIntField("unknown1", 0), LEIntField("unknown2", 0),] class SkinnyMessageOnHook(SkinnyMessageOffHook): name = 'On Hook' class SkinnyMessageCallState(Packet): name='Skinny Call state message' fields_desc = [ LEIntEnumField("state", 1, skinny_callstates), LEIntField("instance", 1), LEIntField("callid", 0), LEIntField("unknown1", 4), LEIntField("unknown2", 0), LEIntField("unknown3", 0) ] class SkinnyMessageSoftKeyEvent(Packet): name='Soft Key Event' fields_desc = [ LEIntField("key", 0), LEIntField("instance", 1), LEIntField("callid", 0)] class SkinnyMessageSetRinger(Packet): name='Ring message' fields_desc = [ LEIntEnumField("ring", 0x1, skinny_ring_type), LEIntField("unknown1", 0), LEIntField("unknown2", 0), LEIntField("unknown3", 0) ] _skinny_tones = { 0x21: 'Inside dial tone', 0x22: 'xxx', 0x23: 'xxx', 0x24: 'Alerting tone', 0x25: 'Reorder Tone' } class SkinnyMessageStartTone(Packet): name='Start tone' fields_desc = [ LEIntEnumField("tone", 0x21, _skinny_tones), LEIntField("unknown1", 0), LEIntField("instance", 1), LEIntField("callid", 0)] class SkinnyMessageStopTone(SkinnyMessageGeneric): name='stop tone' fields_desc = [ LEIntField("instance", 1), LEIntField("callid", 0)] class SkinnyMessageSpeakerMode(Packet): name='Speaker mdoe' fields_desc = [ LEIntEnumField("ring", 0x1, skinny_speaker_modes) ] class SkinnyMessageSetLamp(Packet): name='Lamp message (light of the phone)' fields_desc = [ LEIntEnumField("stimulus", 0x5, skinny_stimulus), LEIntField("instance", 1), LEIntEnumField("mode", 2, skinny_lamp_mode) ] class SkinnyMessageSoftKeyEvent(Packet): name=' Call state message' fields_desc = [ LEIntField("instance", 1), LEIntField("callid", 0), LEIntField("set", 0), LEIntField("map", 0xffff)] class SkinnyMessagePromptStatus(Packet): name='Prompt status' fields_desc = [ LEIntField("timeout", 0), StrFixedLenField("text", "\0"*32, 32), LEIntField("instance", 1), LEIntField("callid", 0)] class SkinnyMessageCallPlane(Packet): name='Activate/Desactivate Call Plane Message' fields_desc = [ LEIntField("instance", 1)] class SkinnyMessageTimeDate(Packet): name='Setting date and time' fields_desc = [ SkinnyDateTimeField("settime", None), LEIntField("timestamp", 0) ] class SkinnyMessageClearPromptStatus(Packet): name='clear prompt status' fields_desc = [ LEIntField("instance", 1), LEIntField("callid", 0)] class SkinnyMessageKeypadButton(Packet): name='keypad button' fields_desc = [ LEIntField("key", 0), LEIntField("instance", 1), LEIntField("callid", 0)] class SkinnyMessageDialedNumber(Packet): name='dialed number' fields_desc = [ StrFixedLenField("number", "1337", 24), LEIntField("instance", 1), LEIntField("callid", 0)] _skinny_message_callinfo_restrictions = ['CallerName' , 'CallerNumber' , 'CalledName' , 'CalledNumber' , 'OriginalCalledName' , 'OriginalCalledNumber' , 'LastRedirectName' , 'LastRedirectNumber'] + ['Bit%d' % i for i in range(8,15)] class SkinnyMessageCallInfo(Packet): name='call information' fields_desc = [ StrFixedLenField("callername", "Jean Valjean", 40), StrFixedLenField("callernum", "1337", 24), StrFixedLenField("calledname", "Causette", 40), StrFixedLenField("callednum", "1034", 24), LEIntField("lineinstance", 1), LEIntField("callid", 0), StrFixedLenField("originalcalledname", "Causette", 40), StrFixedLenField("originalcallednum", "1034", 24), StrFixedLenField("lastredirectingname", "Causette", 40), StrFixedLenField("lastredirectingnum", "1034", 24), LEIntField("originalredirectreason", 0), LEIntField("lastredirectreason", 0), StrFixedLenField('voicemailboxG', '\0'*24, 24), StrFixedLenField('voicemailboxD', '\0'*24, 24), StrFixedLenField('originalvoicemailboxD', '\0'*24, 24), StrFixedLenField('lastvoicemailboxD', '\0'*24, 24), LEIntField('security', 0), FlagsField('restriction', 0, 16, _skinny_message_callinfo_restrictions), LEIntField('unknown', 0)] class SkinnyRateField(LEIntField): def i2repr(self, pkt, x): if x is None: x=0 return '%d ms/pkt' % x _skinny_codecs = { 0x0: 'xxx', 0x1: 'xxx', 0x2: 'xxx', 0x3: 'xxx', 0x4: 'G711 ulaw 64k' } _skinny_echo = { 0x0: 'echo cancelation off', 0x1: 'echo cancelation on' } class SkinnyMessageOpenReceiveChannel(Packet): name='open receive channel' fields_desc = [LEIntField('conference', 0), LEIntField('passthru', 0), SkinnyRateField('rate', 20), LEIntEnumField('codec', 4, _skinny_codecs), LEIntEnumField('echo', 0, _skinny_echo), LEIntField('unknown1', 0), LEIntField('callid', 0)] def guess_payload_class(self, p): return conf.padding_layer _skinny_receive_channel_status = { 0x0: 'ok', 0x1: 'ko' } class SkinnyMessageOpenReceiveChannelAck(Packet): name='open receive channel' fields_desc = [LEIntEnumField('status', 0, _skinny_receive_channel_status), IPField('remote', '0.0.0.0'), LEIntField('port', RandShort()), LEIntField('passthru', 0), LEIntField('callid', 0)] _skinny_silence = { 0x0: 'silence suppression off', 0x1: 'silence suppression on', } class SkinnyFramePerPacketField(LEIntField): def i2repr(self, pkt, x): if x is None: x=0 return '%d frames/pkt' % x class SkinnyMessageStartMediaTransmission(Packet): name='start multimedia transmission' fields_desc = [LEIntField('conference', 0), LEIntField('passthru', 0), IPField('remote', '0.0.0.0'), LEIntField('port', RandShort()), SkinnyRateField('rate', 20), LEIntEnumField('codec', 4, _skinny_codecs), LEIntField('precedence', 200), LEIntEnumField('silence', 0, _skinny_silence), SkinnyFramePerPacketField('maxframes', 0), LEIntField('unknown1', 0), LEIntField('callid', 0)] def guess_payload_class(self, p): return conf.padding_layer class SkinnyMessageCloseReceiveChannel(Packet): name='close receive channel' fields_desc = [LEIntField('conference', 0), LEIntField('passthru', 0), IPField('remote', '0.0.0.0'), LEIntField('port', RandShort()), SkinnyRateField('rate', 20), LEIntEnumField('codec', 4, _skinny_codecs), LEIntField('precedence', 200), LEIntEnumField('silence', 0, _skinny_silence), LEIntField('callid', 0)] class SkinnyMessageStopMultiMediaTransmission(Packet): name='stop multimedia transmission' fields_desc = [LEIntField('conference', 0), LEIntField('passthru', 0), LEIntField('callid', 0)] class Skinny(Packet): name="Skinny" fields_desc = [ LEIntField("len", None), LEIntField("res",0), LEIntEnumField("msg",0, skinny_messages) ] def post_build(self, pkt, p): if self.len is None: l=len(p)+len(pkt)-8 # on compte pas les headers len et reserved pkt=struct.pack('@I', l)+pkt[4:] return pkt+p # An helper def get_cls(name, fallback_cls): return globals().get(name, fallback_cls) #return __builtin__.__dict__.get(name, fallback_cls) for msgid,strcls in skinny_messages_cls.items(): cls=get_cls(strcls, SkinnyMessageGeneric) bind_layers(Skinny, cls, {"msg": msgid}) bind_layers(TCP, Skinny, { "dport": 2000 } ) bind_layers(TCP, Skinny, { "sport": 2000 } ) if __name__ == "__main__": interact(mydict=globals(),mybanner="Welcome to Skinny add-on")
gpl-2.0
bakhtout/odoo-educ
addons/base_setup/base_setup.py
382
5430
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import simplejson import cgi from openerp import tools from openerp.osv import fields, osv from openerp.tools.translate import _ from lxml import etree # Specify Your Terminology will move to 'partner' module class specify_partner_terminology(osv.osv_memory): _name = 'base.setup.terminology' _inherit = 'res.config' _columns = { 'partner': fields.selection([ ('Customer','Customer'), ('Client','Client'), ('Member','Member'), ('Patient','Patient'), ('Partner','Partner'), ('Donor','Donor'), ('Guest','Guest'), ('Tenant','Tenant') ], 'How do you call a Customer', required=True ), } _defaults={ 'partner' :'Customer', } def make_translations(self, cr, uid, ids, name, type, src, value, res_id=0, context=None): trans_obj = self.pool.get('ir.translation') user_obj = self.pool.get('res.users') context_lang = user_obj.browse(cr, uid, uid, context=context).lang existing_trans_ids = trans_obj.search(cr, uid, [('name','=',name), ('lang','=',context_lang), ('type','=',type), ('src','=',src), ('res_id','=',res_id)]) if existing_trans_ids: trans_obj.write(cr, uid, existing_trans_ids, {'value': value}, context=context) else: create_id = trans_obj.create(cr, uid, {'name': name,'lang': context_lang, 'type': type, 'src': src, 'value': value , 'res_id': res_id}, context=context) return {} def execute(self, cr, uid, ids, context=None): def _case_insensitive_replace(ref_string, src, value): import re pattern = re.compile(src, re.IGNORECASE) return pattern.sub(_(value), _(ref_string)) trans_obj = self.pool.get('ir.translation') fields_obj = self.pool.get('ir.model.fields') menu_obj = self.pool.get('ir.ui.menu') act_window_obj = self.pool.get('ir.actions.act_window') for o in self.browse(cr, uid, ids, context=context): #translate label of field field_ids = fields_obj.search(cr, uid, [('field_description','ilike','Customer')]) for f_id in fields_obj.browse(cr ,uid, field_ids, context=context): field_ref = f_id.model_id.model + ',' + f_id.name self.make_translations(cr, uid, ids, field_ref, 'field', f_id.field_description, _case_insensitive_replace(f_id.field_description,'Customer',o.partner), context=context) #translate help tooltip of field for obj in self.pool.models.values(): for field_name, field_rec in obj._columns.items(): if field_rec.help.lower().count('customer'): field_ref = obj._name + ',' + field_name self.make_translations(cr, uid, ids, field_ref, 'help', field_rec.help, _case_insensitive_replace(field_rec.help,'Customer',o.partner), context=context) #translate menuitems menu_ids = menu_obj.search(cr,uid, [('name','ilike','Customer')]) for m_id in menu_obj.browse(cr, uid, menu_ids, context=context): menu_name = m_id.name menu_ref = 'ir.ui.menu' + ',' + 'name' self.make_translations(cr, uid, ids, menu_ref, 'model', menu_name, _case_insensitive_replace(menu_name,'Customer',o.partner), res_id=m_id.id, context=context) #translate act window name act_window_ids = act_window_obj.search(cr, uid, [('name','ilike','Customer')]) for act_id in act_window_obj.browse(cr ,uid, act_window_ids, context=context): act_ref = 'ir.actions.act_window' + ',' + 'name' self.make_translations(cr, uid, ids, act_ref, 'model', act_id.name, _case_insensitive_replace(act_id.name,'Customer',o.partner), res_id=act_id.id, context=context) #translate act window tooltips act_window_ids = act_window_obj.search(cr, uid, [('help','ilike','Customer')]) for act_id in act_window_obj.browse(cr ,uid, act_window_ids, context=context): act_ref = 'ir.actions.act_window' + ',' + 'help' self.make_translations(cr, uid, ids, act_ref, 'model', act_id.help, _case_insensitive_replace(act_id.help,'Customer',o.partner), res_id=act_id.id, context=context) return {} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
yk5/incubator-airflow
tests/utils.py
15
3597
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import logging import unittest import airflow.utils.logging from airflow import configuration from airflow.exceptions import AirflowException from airflow.utils.operator_resources import Resources class LogUtilsTest(unittest.TestCase): def test_gcs_url_parse(self): """ Test GCS url parsing """ logging.info( 'About to create a GCSLog object without a connection. This will ' 'log an error but testing will proceed.') glog = airflow.utils.logging.GCSLog() self.assertEqual( glog.parse_gcs_url('gs://bucket/path/to/blob'), ('bucket', 'path/to/blob')) # invalid URI self.assertRaises( AirflowException, glog.parse_gcs_url, 'gs:/bucket/path/to/blob') # trailing slash self.assertEqual( glog.parse_gcs_url('gs://bucket/path/to/blob/'), ('bucket', 'path/to/blob')) # bucket only self.assertEqual( glog.parse_gcs_url('gs://bucket/'), ('bucket', '')) class OperatorResourcesTest(unittest.TestCase): def setUp(self): configuration.load_test_config() def test_all_resources_specified(self): resources = Resources(cpus=1, ram=2, disk=3, gpus=4) self.assertEqual(resources.cpus.qty, 1) self.assertEqual(resources.ram.qty, 2) self.assertEqual(resources.disk.qty, 3) self.assertEqual(resources.gpus.qty, 4) def test_some_resources_specified(self): resources = Resources(cpus=0, disk=1) self.assertEqual(resources.cpus.qty, 0) self.assertEqual(resources.ram.qty, configuration.conf.getint('operators', 'default_ram')) self.assertEqual(resources.disk.qty, 1) self.assertEqual(resources.gpus.qty, configuration.conf.getint('operators', 'default_gpus')) def test_no_resources_specified(self): resources = Resources() self.assertEqual(resources.cpus.qty, configuration.conf.getint('operators', 'default_cpus')) self.assertEqual(resources.ram.qty, configuration.conf.getint('operators', 'default_ram')) self.assertEqual(resources.disk.qty, configuration.conf.getint('operators', 'default_disk')) self.assertEqual(resources.gpus.qty, configuration.conf.getint('operators', 'default_gpus')) def test_negative_resource_qty(self): with self.assertRaises(AirflowException): Resources(cpus=-1)
apache-2.0
neno1978/pelisalacarta
python/main-classic/servers/decrypters/linkbucks.py
2
1911
# -*- coding: utf-8 -*- # ------------------------------------------------------------ # pelisalacarta - XBMC Plugin # Conector para linkbucks # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ # ------------------------------------------------------------ import urllib from core import logger from core import scrapertools # Obtiene la URL que hay detrás de un enlace a linkbucks def get_long_url(short_url): logger.info("(short_url='%s')" % short_url) request_headers = [] request_headers.append(["User-Agent", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"]) request_headers.append(["Referer", "http://linkdecrypter.com"]) post = urllib.urlencode({"pro_links": short_url, "modo_links": "text", "modo_recursivo": "on", "link_cache": "on"}) url = "http://linkdecrypter.com/" # Parche porque python no parece reconocer bien la cabecera phpsessid body, response_headers = scrapertools.read_body_and_headers(url, post=post, headers=request_headers) location = "" n = 1 while True: for name, value in response_headers: if name == "set-cookie": logger.info("Set-Cookie: " + value) cookie_name = scrapertools.get_match(value, '(.*?)\=.*?\;') cookie_value = scrapertools.get_match(value, '.*?\=(.*?)\;') request_headers.append(["Cookie", cookie_name + "=" + cookie_value]) body, response_headers = scrapertools.read_body_and_headers(url, headers=request_headers) logger.info("body=" + body) try: location = scrapertools.get_match(body, '<textarea.*?class="caja_des">([^<]+)</textarea>') logger.info("location=" + location) break except: n = n + 1 if n > 3: break return location
gpl-3.0
datagovuk/ckanext-issues
ckanext/issues/tests/controllers/test_search.py
2
6940
from ckan.plugins import toolkit try: from ckan.tests import helpers from ckan.tests import factories except ImportError: from ckan.new_tests import helpers from ckan.new_tests import factories from ckanext.issues.tests import factories as issue_factories from ckanext.issues import model as issue_model from nose.tools import (assert_is_not_none, assert_equals, assert_in, assert_not_in) import bs4 class TestSearchBox(helpers.FunctionalTestBase): def setup(self): super(TestSearchBox, self).setup() self.owner = factories.User() self.org = factories.Organization(user=self.owner) self.dataset = factories.Dataset(user=self.owner, owner_org=self.org['name']) self.issue = issue_factories.Issue(user=self.owner, dataset_id=self.dataset['id']) self.app = self._get_test_app() def test_search_box_appears_issue_dataset_page(self): response = self.app.get( url=toolkit.url_for('issues_dataset', dataset_id=self.dataset['id'], issue_number=self.issue['number']), ) soup = bs4.BeautifulSoup(response.body) edit_button = soup.find('form', {'class': 'search-form'}) assert_is_not_none(edit_button) def test_search_box_submits_q_get(self): in_search = [issue_factories.Issue(user_id=self.owner['id'], dataset_id=self.dataset['id'], title=title) for title in ['some titLe', 'another Title']] # some issues not in the search [issue_factories.Issue(user_id=self.owner['id'], dataset_id=self.dataset['id'], title=title) for title in ['blah', 'issue']] issue_dataset = self.app.get( url=toolkit.url_for('issues_dataset', dataset_id=self.dataset['id'], issue_number=self.issue['number']), ) search_form = issue_dataset.forms[1] search_form['q'] = 'title' res = search_form.submit() soup = bs4.BeautifulSoup(res.body) issue_links = soup.find(id='issue-list').find_all('h4') titles = set([i.a.text.strip() for i in issue_links]) assert_equals(set([i['title'] for i in in_search]), titles) class TestSearchFilters(helpers.FunctionalTestBase): def setup(self): super(TestSearchFilters, self).setup() self.owner = factories.User() self.org = factories.Organization(user=self.owner) self.dataset = factories.Dataset(user=self.owner, owner_org=self.org['name']) self.issues = { 'visible': issue_factories.Issue(user=self.owner, title='visible_issue', dataset_id=self.dataset['id']), 'closed': issue_factories.Issue(user=self.owner, title='closed_issue', dataset_id=self.dataset['id']), 'hidden': issue_factories.Issue(user=self.owner, title='hidden_issue', dataset_id=self.dataset['id'], visibility='hidden'), } # close our issue helpers.call_action( 'issue_update', issue_number=self.issues['closed']['number'], dataset_id=self.dataset['id'], context={'user': self.owner['name']}, status='closed' ) issue = issue_model.Issue.get(self.issues['hidden']['id']) issue.visibility = 'hidden' issue.save() self.app = self._get_test_app() def test_click_visiblity_links(self): env = {'REMOTE_USER': self.owner['name'].encode('ascii')} response = self.app.get( url=toolkit.url_for('issues_dataset', dataset_id=self.dataset['id']), extra_environ=env, ) # visible and hidden should be shown, but not closed assert_in('2 issues found', response) assert_in('visible_issue', response) assert_in('hidden_issue', response) assert_not_in('closed_issue', response) # click the hidden filter response = response.click(linkid='hidden-filter', extra_environ=env) assert_in('1 issue found', response) assert_not_in('visible_issue', response) assert_in('hidden_issue', response) assert_not_in('closed_issue', response) # click the visible filter response = response.click(linkid='visible-filter', extra_environ=env) assert_in('1 issue found', response) assert_in('visible_issue', response) assert_not_in('hidden_issue', response) assert_not_in('closed_issue', response) # clear the filter by clikcing on visible again response = response.click(linkid='visible-filter', extra_environ=env) assert_in('2 issues found', response) assert_in('visible_issue', response) assert_in('hidden_issue', response) assert_not_in('closed_issue', response) def test_click_status_links(self): env = {'REMOTE_USER': self.owner['name'].encode('ascii')} response = self.app.get( url=toolkit.url_for('issues_dataset', dataset_id=self.dataset['id']), extra_environ=env, ) # visible and hidden should be shown, but not closed assert_in('2 issues found', response) assert_in('visible_issue', response) assert_in('hidden_issue', response) assert_not_in('closed_issue', response) # click the closed filter response = response.click(linkid='closed-filter', extra_environ=env) assert_in('1 issue found', response) assert_not_in('visible_issue', response) assert_not_in('hidden_issue', response) assert_in('closed_issue', response) # click the open filter response = response.click(linkid='open-filter', extra_environ=env) assert_in('2 issues found', response) assert_in('visible_issue', response) assert_in('hidden_issue', response) assert_not_in('closed_issue', response) def test_visiblity_links_do_not_appear_for_unauthed_user(self): response = self.app.get( url=toolkit.url_for('issues_dataset', dataset_id=self.dataset['id']), ) assert_not_in('filter-hidden', response) assert_not_in('filter-visible', response)
mit
Dunkas12/BeepBoopBot
lib/youtube_dl/extractor/clipfish.py
17
2371
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, unified_strdate, ) class ClipfishIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?clipfish\.de/(?:[^/]+/)+video/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.clipfish.de/special/ugly-americans/video/4343170/s01-e01-ugly-americans-date-in-der-hoelle/', 'md5': '720563e467b86374c194bdead08d207d', 'info_dict': { 'id': '4343170', 'ext': 'mp4', 'title': 'S01 E01 - Ugly Americans - Date in der Hölle', 'description': 'Mark Lilly arbeitet im Sozialdienst der Stadt New York und soll Immigranten bei ihrer Einbürgerung in die USA zur Seite stehen.', 'upload_date': '20161005', 'duration': 1291, 'view_count': int, } } def _real_extract(self, url): video_id = self._match_id(url) video_info = self._download_json( 'http://www.clipfish.de/devapi/id/%s?format=json&apikey=hbbtv' % video_id, video_id)['items'][0] formats = [] m3u8_url = video_info.get('media_videourl_hls') if m3u8_url: formats.append({ 'url': m3u8_url.replace('de.hls.fra.clipfish.de', 'hls.fra.clipfish.de'), 'ext': 'mp4', 'format_id': 'hls', }) mp4_url = video_info.get('media_videourl') if mp4_url: formats.append({ 'url': mp4_url, 'format_id': 'mp4', 'width': int_or_none(video_info.get('width')), 'height': int_or_none(video_info.get('height')), 'tbr': int_or_none(video_info.get('bitrate')), }) descr = video_info.get('descr') if descr: descr = descr.strip() return { 'id': video_id, 'title': video_info['title'], 'description': descr, 'formats': formats, 'thumbnail': video_info.get('media_content_thumbnail_large') or video_info.get('media_thumbnail'), 'duration': int_or_none(video_info.get('media_length')), 'upload_date': unified_strdate(video_info.get('pubDate')), 'view_count': int_or_none(video_info.get('media_views')) }
gpl-3.0
lorehov/mongolock
src/test_mongolock.py
1
3268
from datetime import datetime, timedelta import pytest from pymongo import MongoClient from mongolock import MongoLock, MongoLockLocked, MongoLockException connection = MongoClient() db_name = 'mongolock_test' col_name = 'locks' @pytest.fixture() def lock(): connection[db_name][col_name].remove() return MongoLock(client=connection, db=db_name, collection=col_name) def test_locked_successfully(lock): assert lock.lock('key', 'owner') def test_locked_successfully_second_time(lock): lock.lock('key', 'owner') lock.release('key', 'owner') assert lock.lock('key', 'owner') def test_lock_already_locked(lock): assert lock.lock('key', 'another_one') assert lock.lock('key', 'owner') is False def test_lock_stealed(lock): lock.lock('key', 'owner', expire=0.1) assert lock.lock('key', 'owner', timeout=10) def test_release(lock): lock.lock('key', 'owner') lock.release('key', 'owner') result = lock.get_lock_info('key') assert result['locked'] is False def test_should_not_release_not_lock_owned_by_another_one(lock): lock.lock('key', 'another_one') assert lock.get_lock_info('key')['locked'] def test_should_not_release_not_locked_lock(lock): lock.release('key', 'owner') assert lock.get_lock_info('key') is None def test_context(lock): current_lock = lock.get_lock_info('key') assert current_lock is None with lock('key', 'owner'): result = lock.get_lock_info('key') assert result['locked'] def test_context_raises_if_locked(lock): lock.lock('key', 'owner') with pytest.raises(MongoLockLocked): with lock('key', 'owner'): result = lock.get_lock_info('key') assert result['locked'] def test_touch(lock): dtnow = datetime.utcnow() lock.lock('key', 'owner', expire=1) lock.touch('key', 'owner', 1) new_expire = lock.get_lock_info('key')['expire'] assert new_expire > dtnow def test_cant_touch_locked_by_another(lock): lock.lock('key', 'another_one', expire=1) with pytest.raises(MongoLockException): lock.touch('key', 'owner', 1) def test_lock_released_if_exception_raised(lock): try: with lock('key', 'owner'): raise Exception('Crash!') except: assert lock.get_lock_info('key')['locked'] is False def touch_expired_not_specified(lock): lock.lock('key', 'owner', expire=1) lock.touch('key', 'owner', 1) assert lock.get_lock_info('key')['expire'] is None def test_create_lock_by_collection(): connection[db_name][col_name].remove() collection = connection[db_name][col_name] assert MongoLock(collection=collection).lock('key', 'owner') @pytest.mark.parametrize("locked, expire, is_locked", [ (None, None, False), (True, None, True), (True, datetime.utcnow() - timedelta(seconds=1), False), (True, datetime.utcnow() + timedelta(seconds=1), True) ]) def test_is_locked(lock, locked, expire, is_locked): if locked is not None: connection[db_name][col_name].insert({ '_id': 'key', 'locked': locked, 'owner': 'owner', 'created': datetime.utcnow(), 'expire': expire }) assert lock.is_locked('key') == is_locked
bsd-2-clause
choderalab/density
src/density_sim.py
1
8797
import simtk.openmm.app as app import simtk.openmm as mm import simtk.unit as u from sys import stdout import sys import gaff2xml from gaff2xml.utils import run_antechamber from gaff2xml.utils import create_ffxml_file from gaff2xml.utils import convert_molecule import mdtraj as md import numpy as np import simtk.openmm.app.element as elem import pymbar.timeseries as ts import rdkit.Chem as chem from rdkit.Chem import AllChem as allchem def caseFromPDB(molecule_name, num, steps, ff_mod): ################################## ## How to write a specific case ## ################################## # molecule_name = name of molecule # num = number of molecules in box # ff_mod = True if using charge values from gromacs # steps = number of simulation steps (500000) ################################## infile, topfile, outfile, outdata, lastframe_file, dcd_file, mol2_filename = name_outputs(molecule_name, num, ff_mod) add_bonds_to_pdb(infile, topfile) forcefield = generate_ff(molecule_name, infile, mol2_filename, ff_mod, topfile) traj, xyz, top = set_topology(infile, num) top = assign_element(top) system, integrator, temperature = openmm_system(forcefield, top) openmm_simulation(system, top, integrator, xyz, temperature, outfile, lastframe_file, outdata, dcd_file, steps) avg_density(dcd_file, lastframe_file, outdata) def caseFromPDBMassMod(molecule_name, num, steps, ff_mod): infile, topfile, outfile, outdata, lastframe_file, dcd_file, mol2_filename = name_outputs(molecule_name, num, ff_mod) add_bonds_to_pdb(infile, topfile) forcefield = generate_ff(molecule_name, infile, mol2_filename, ff_mod, topfile) traj, xyz, top = set_topology(infile, num) top = assign_element(top) system, integrator, temperature = openmm_system(forcefield, top) system = modify_mass(system, traj, topfile) openmm_simulation(system, top, integrator, xyz, temperature, outfile, lastframe_file, outdata, dcd_file, steps) avg_density(dcd_file, lastframe_file, outdata) def caseFromSmiles(molecule_name, smiles, num, steps, ff_mod): infile, topfile, outfile, outdata, lastframe_file, dcd_file, mol2_filename = name_outputs(molecule_name, num, ff_mod) files_from_smiles(smiles, infile) forcefield = generate_ff(molecule_name, infile, mol2_filename, ff_mod, topfile) traj, xyz, top = set_topology(infile, num) system, integrator, temperature = openmm_system(forcefield, top) system = modify_mass(system, traj, topfile) openmm_simulation(system, top, integrator, xyz, temperature, outfile, lastframe_file, outdata, dcd_file, steps) avg_density(dcd_file, lastframe_file, outdata) def name_outputs(molecule_name, num, ff_mod): infile = molecule_name+'.pdb' topfile = molecule_name+'.top' if ff_mod == True: outfile = molecule_name+'_'+str(num)+'_out_ff.pdb' else: outfile = molecule_name+'_'+str(num)+'_out.pdb' outdata = outfile[:-4]+'.dat' lastframe_file = outfile[:-4]+'_last.pdb' dcd_file = outfile[:-4]+'.dcd' mol2_filename = molecule_name + '.mol2' return infile, topfile, outfile, outdata, lastframe_file, dcd_file, mol2_filename def files_from_smiles(smiles, infile): molecule = chem.MolFromSmiles(smiles) molecule = chem.AddHs(molecule) allchem.EmbedMolecule(molecule) allchem.UFFOptimizeMolecule(molecule) chem.MolToPDBFile(molecule, infile) def add_bonds_to_pdb(infile, topfile): t = md.load_pdb(infile) if t.n_residues == 1: return first = t.top.residue(0) t.restrict_atoms(range(first.n_atoms)) gtop = app.GromacsTopFile(topfile)._currentMoleculeType top, bonds = t.top.to_dataframe() bonds = np.array([(row[0],row[1]) for row in gtop.bonds],'int') bonds = bonds -1 t.top = md.Topology.from_dataframe(top, bonds) t.save(infile) def generate_ff(molecule_name, infile, mol2_filename, ff_mod, topfile): convert_molecule(infile,mol2_filename) gaff_mol2_filename, frcmod_filename = run_antechamber(molecule_name, mol2_filename, charge_method="bcc") ffxml = create_ffxml_file(gaff_mol2_filename, frcmod_filename) forcefield = app.ForceField(ffxml) if ff_mod == True: ff_mod_funct(forcefield, topfile) return forcefield def ff_mod_funct(forcefield, topfile): g = forcefield.getGenerators()[3] gtop = app.GromacsTopFile(topfile)._currentMoleculeType lock = dict((row[4],float(row[6])) for row in gtop.atoms) for key, val in g.typeMap.iteritems(): f = lambda x: x.split("-")[1] lkey = f(key) new_charge = lock[lkey] g.typeMap[key] = (new_charge, val[1], val[2]) def set_topology(infile, num): traj = gaff2xml.packmol.pack_box([infile],[num]) xyz = traj.openmm_positions(0) top = traj.top.to_openmm() top.setUnitCellDimensions(mm.Vec3(*traj.unitcell_lengths[0])*u.nanometer) return traj, xyz, top def openmm_system(forcefield, top): system = forcefield.createSystem(top, nonbondedMethod=app.PME, nonbondedCutoff=1*u.nanometer, constraints=app.HBonds) temperature = 298.15*u.kelvin integrator = mm.LangevinIntegrator(temperature, 1/u.picosecond, 0.002*u.picoseconds) barostat = mm.MonteCarloBarostat(1.0*u.atmospheres, temperature, 25) system.addForce(barostat) return system, integrator, temperature def openmm_simulation(system, top, integrator, xyz, temperature, outfile, lastframe_file, outdata, dcd_file, steps): simulation = app.Simulation(top, system, integrator) simulation.context.setPositions(xyz) simulation.minimizeEnergy() simulation.context.setVelocitiesToTemperature(temperature) simulation.step(60000) simulation.reporters.append(app.PDBReporter(outfile, 1000)) simulation.reporters.append(app.PDBReporter(lastframe_file,steps-1)) simulation.reporters.append(app.StateDataReporter(outdata, 1000, step=True, temperature=True, density=True, potentialEnergy=True)) simulation.reporters.append(app.StateDataReporter(stdout, 1000, step=True, temperature=True, density=True, potentialEnergy=True)) simulation.reporters.append(app.DCDReporter(dcd_file,1000)) simulation.step(steps) def avg_density(dcd_file,lastframe_file, outdata): trj = md.load(dcd_file, top=lastframe_file) volume = trj.unitcell_lengths.prod(1) mass = sum([a.element.mass for a in trj.top.atoms]) / 6.0221413e23 density_nounit = mass / volume density = density_nounit * u.gram / u.nanometer**3 A_t = np.array(density_nounit ) indices = ts.subsampleCorrelatedData(A_t) ind_density = density[indices] avg_ind_density = ind_density.mean().in_units_of(u.gram / u.liter) std_ind_density = ind_density.std().in_units_of(u.gram / u.liter) N = len(indices) stderr_ind_density = std_ind_density / (N**0.5) temps = [] fid = open(outdata, 'r') fid.next() for line in fid: dtemp = float(line.split(',')[1]) temps.append(dtemp) fid.close() temps = np.array(temps) avg_temp = temps.mean() density_file = 'density_'+dcd_file[:-4]+'_indstd.dat' f = open(density_file, 'w') f.write("Average density of the system:\n") f.write(str(avg_ind_density)) f.write("\nStandard Deviation of density:\n") f.write(str(std_ind_density)) f.write("\nStandard Error of the density:\n") f.write(str(stderr_ind_density)) f.write("\nAverage Temperature of the system:\n") f.write(str(avg_temp)) f.close() def modify_mass(system, traj, topfile): n_atoms = traj.n_atoms n_residues = traj.n_residues mlc_atoms = n_atoms / n_residues gtop = app.GromacsTopFile(topfile)._currentMoleculeType lock = dict((row[4],float(row[7])) for row in gtop.atoms) print("\nModified atomic masses:") for i in range(mlc_atoms): mass = lock[traj.top.atom(i).name] for x in np.linspace(i, n_atoms+i, n_residues, endpoint=False): system.setParticleMass(int(x), mass *u.dalton) print(str(traj.top.atom(i).name)+' '+str(system.getParticleMass(i))) print("\n##############################################################\n") return system def assign_element(top): for chain in top.chains(): for res in chain.residues(): for atom in res.atoms(): ## this is only going to work as long as there are fewer than 10 of an atom type atom.element = elem.get_by_symbol(atom.name[:-1]) return top if __name__ == '__main__': molecule_name = raw_input('Name of molecule\n') num = int(raw_input('Number of molecules in box\n')) ff_mod = ('True' == raw_input('Use gromacs charges? (True or False)\n')) steps = int(raw_input('Number of simulation steps (Use 500000)\n')) case(molecule_name, num, steps, ff_mod)
gpl-2.0
cuongnv23/ansible
lib/ansible/module_utils/bigswitch_utils.py
187
3354
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2016, Ted Elhourani <ted@bigswitch.com> # # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import json from ansible.module_utils.urls import fetch_url class Response(object): def __init__(self, resp, info): self.body = None if resp: self.body = resp.read() self.info = info @property def json(self): if not self.body: if "body" in self.info: return json.loads(self.info["body"]) return None try: return json.loads(self.body) except ValueError: return None @property def status_code(self): return self.info["status"] class Rest(object): def __init__(self, module, headers, baseurl): self.module = module self.headers = headers self.baseurl = baseurl def _url_builder(self, path): if path[0] == '/': path = path[1:] return '%s/%s' % (self.baseurl, path) def send(self, method, path, data=None, headers=None): url = self._url_builder(path) data = self.module.jsonify(data) resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method) return Response(resp, info) def get(self, path, data=None, headers=None): return self.send('GET', path, data, headers) def put(self, path, data=None, headers=None): return self.send('PUT', path, data, headers) def post(self, path, data=None, headers=None): return self.send('POST', path, data, headers) def patch(self, path, data=None, headers=None): return self.send('PATCH', path, data, headers) def delete(self, path, data=None, headers=None): return self.send('DELETE', path, data, headers)
gpl-3.0
gannetson/django
tests/postgres_tests/test_array.py
6
17940
import decimal import json import unittest import uuid from django import forms from django.core import exceptions, serializers, validators from django.core.management import call_command from django.db import IntegrityError, connection, models from django.test import TransactionTestCase, override_settings from django.utils import timezone from . import PostgreSQLTestCase from .models import ( ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel, NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel, ) try: from django.contrib.postgres.fields import ArrayField from django.contrib.postgres.forms import SimpleArrayField, SplitArrayField except ImportError: pass class TestSaveLoad(PostgreSQLTestCase): def test_integer(self): instance = IntegerArrayModel(field=[1, 2, 3]) instance.save() loaded = IntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_char(self): instance = CharArrayModel(field=['hello', 'goodbye']) instance.save() loaded = CharArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_dates(self): instance = DateTimeArrayModel( datetimes=[timezone.now()], dates=[timezone.now().date()], times=[timezone.now().time()], ) instance.save() loaded = DateTimeArrayModel.objects.get() self.assertEqual(instance.datetimes, loaded.datetimes) self.assertEqual(instance.dates, loaded.dates) self.assertEqual(instance.times, loaded.times) def test_tuples(self): instance = IntegerArrayModel(field=(1,)) instance.save() loaded = IntegerArrayModel.objects.get() self.assertSequenceEqual(instance.field, loaded.field) def test_integers_passed_as_strings(self): # This checks that get_prep_value is deferred properly instance = IntegerArrayModel(field=['1']) instance.save() loaded = IntegerArrayModel.objects.get() self.assertEqual(loaded.field, [1]) def test_default_null(self): instance = NullableIntegerArrayModel() instance.save() loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk) self.assertEqual(loaded.field, None) self.assertEqual(instance.field, loaded.field) def test_null_handling(self): instance = NullableIntegerArrayModel(field=None) instance.save() loaded = NullableIntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) instance = IntegerArrayModel(field=None) with self.assertRaises(IntegrityError): instance.save() def test_nested(self): instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]]) instance.save() loaded = NestedIntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_other_array_types(self): instance = OtherTypesArrayModel( ips=['192.168.0.1', '::1'], uuids=[uuid.uuid4()], decimals=[decimal.Decimal(1.25), 1.75], ) instance.save() loaded = OtherTypesArrayModel.objects.get() self.assertEqual(instance.ips, loaded.ips) self.assertEqual(instance.uuids, loaded.uuids) self.assertEqual(instance.decimals, loaded.decimals) class TestQuerying(PostgreSQLTestCase): def setUp(self): self.objs = [ NullableIntegerArrayModel.objects.create(field=[1]), NullableIntegerArrayModel.objects.create(field=[2]), NullableIntegerArrayModel.objects.create(field=[2, 3]), NullableIntegerArrayModel.objects.create(field=[20, 30, 40]), NullableIntegerArrayModel.objects.create(field=None), ] def test_exact(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__exact=[1]), self.objs[:1] ) def test_isnull(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__isnull=True), self.objs[-1:] ) def test_gt(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__gt=[0]), self.objs[:4] ) def test_lt(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__lt=[2]), self.objs[:1] ) def test_in(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]), self.objs[:2] ) def test_contained_by(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]), self.objs[:2] ) def test_contains(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__contains=[2]), self.objs[1:3] ) def test_contains_charfield(self): # Regression for #22907 self.assertSequenceEqual( CharArrayModel.objects.filter(field__contains=['text']), [] ) def test_contained_by_charfield(self): self.assertSequenceEqual( CharArrayModel.objects.filter(field__contained_by=['text']), [] ) def test_overlap_charfield(self): self.assertSequenceEqual( CharArrayModel.objects.filter(field__overlap=['text']), [] ) def test_index(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0=2), self.objs[1:3] ) def test_index_chained(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0__lt=3), self.objs[0:3] ) def test_index_nested(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0__0=1), [instance] ) @unittest.expectedFailure def test_index_used_on_nested_data(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0=[1, 2]), [instance] ) def test_overlap(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]), self.objs[0:3] ) def test_len(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__len__lte=2), self.objs[0:3] ) def test_slice(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0_1=[2]), self.objs[1:3] ) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]), self.objs[2:3] ) @unittest.expectedFailure def test_slice_nested(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]), [instance] ) class TestChecks(PostgreSQLTestCase): def test_field_checks(self): field = ArrayField(models.CharField()) field.set_attributes_from_name('field') errors = field.check() self.assertEqual(len(errors), 1) self.assertEqual(errors[0].id, 'postgres.E001') def test_invalid_base_fields(self): field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel')) field.set_attributes_from_name('field') errors = field.check() self.assertEqual(len(errors), 1) self.assertEqual(errors[0].id, 'postgres.E002') @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests") class TestMigrations(TransactionTestCase): available_apps = ['postgres_tests'] def test_deconstruct(self): field = ArrayField(models.IntegerField()) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(type(new.base_field), type(field.base_field)) def test_deconstruct_with_size(self): field = ArrayField(models.IntegerField(), size=3) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(new.size, field.size) def test_deconstruct_args(self): field = ArrayField(models.CharField(max_length=20)) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(new.base_field.max_length, field.base_field.max_length) def test_subclass_deconstruct(self): field = ArrayField(models.IntegerField()) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField') field = ArrayFieldSubclass() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass') @override_settings(MIGRATION_MODULES={ "postgres_tests": "postgres_tests.array_default_migrations", }) def test_adding_field_with_default(self): # See #22962 table_name = 'postgres_tests_integerarraydefaultmodel' with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) call_command('migrate', 'postgres_tests', verbosity=0) with connection.cursor() as cursor: self.assertIn(table_name, connection.introspection.table_names(cursor)) call_command('migrate', 'postgres_tests', 'zero', verbosity=0) with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) class TestSerialization(PostgreSQLTestCase): test_data = '[{"fields": {"field": "[\\"1\\", \\"2\\"]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]' def test_dumping(self): instance = IntegerArrayModel(field=[1, 2]) data = serializers.serialize('json', [instance]) self.assertEqual(json.loads(data), json.loads(self.test_data)) def test_loading(self): instance = list(serializers.deserialize('json', self.test_data))[0].object self.assertEqual(instance.field, [1, 2]) class TestValidation(PostgreSQLTestCase): def test_unbounded(self): field = ArrayField(models.IntegerField()) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([1, None], None) self.assertEqual(cm.exception.code, 'item_invalid') self.assertEqual(cm.exception.message % cm.exception.params, 'Item 1 in the array did not validate: This field cannot be null.') def test_blank_true(self): field = ArrayField(models.IntegerField(blank=True, null=True)) # This should not raise a validation error field.clean([1, None], None) def test_with_size(self): field = ArrayField(models.IntegerField(), size=3) field.clean([1, 2, 3], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([1, 2, 3, 4], None) self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.') def test_nested_array_mismatch(self): field = ArrayField(ArrayField(models.IntegerField())) field.clean([[1, 2], [3, 4]], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([[1, 2], [3, 4, 5]], None) self.assertEqual(cm.exception.code, 'nested_array_mismatch') self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.') def test_with_validators(self): field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)])) field.clean([1, 2], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([0], None) self.assertEqual(cm.exception.code, 'item_invalid') self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Ensure this value is greater than or equal to 1.') class TestSimpleFormField(PostgreSQLTestCase): def test_valid(self): field = SimpleArrayField(forms.CharField()) value = field.clean('a,b,c') self.assertEqual(value, ['a', 'b', 'c']) def test_to_python_fail(self): field = SimpleArrayField(forms.IntegerField()) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,9') self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a whole number.') def test_validate_fail(self): field = SimpleArrayField(forms.CharField(required=True)) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,') self.assertEqual(cm.exception.messages[0], 'Item 2 in the array did not validate: This field is required.') def test_validators_fail(self): field = SimpleArrayField(forms.RegexField('[a-e]{2}')) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,bc,de') self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a valid value.') def test_delimiter(self): field = SimpleArrayField(forms.CharField(), delimiter='|') value = field.clean('a|b|c') self.assertEqual(value, ['a', 'b', 'c']) def test_delimiter_with_nesting(self): field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|') value = field.clean('a,b|c,d') self.assertEqual(value, [['a', 'b'], ['c', 'd']]) def test_prepare_value(self): field = SimpleArrayField(forms.CharField()) value = field.prepare_value(['a', 'b', 'c']) self.assertEqual(value, 'a,b,c') def test_max_length(self): field = SimpleArrayField(forms.CharField(), max_length=2) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,c') self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.') def test_min_length(self): field = SimpleArrayField(forms.CharField(), min_length=4) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,c') self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.') def test_required(self): field = SimpleArrayField(forms.CharField(), required=True) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('') self.assertEqual(cm.exception.messages[0], 'This field is required.') def test_model_field_formfield(self): model_field = ArrayField(models.CharField(max_length=27)) form_field = model_field.formfield() self.assertIsInstance(form_field, SimpleArrayField) self.assertIsInstance(form_field.base_field, forms.CharField) self.assertEqual(form_field.base_field.max_length, 27) def test_model_field_formfield_size(self): model_field = ArrayField(models.CharField(max_length=27), size=4) form_field = model_field.formfield() self.assertIsInstance(form_field, SimpleArrayField) self.assertEqual(form_field.max_length, 4) class TestSplitFormField(PostgreSQLTestCase): def test_valid(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'} form = SplitForm(data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']}) def test_required(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), required=True, size=3) data = {'array_0': '', 'array_1': '', 'array_2': ''} form = SplitForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'array': ['This field is required.']}) def test_remove_trailing_nulls(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True) data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''} form = SplitForm(data) self.assertTrue(form.is_valid(), form.errors) self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']}) def test_required_field(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''} form = SplitForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'array': ['Item 2 in the array did not validate: This field is required.']}) def test_rendering(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) self.assertHTMLEqual(str(SplitForm()), ''' <tr> <th><label for="id_array_0">Array:</label></th> <td> <input id="id_array_0" name="array_0" type="text" /> <input id="id_array_1" name="array_1" type="text" /> <input id="id_array_2" name="array_2" type="text" /> </td> </tr> ''')
bsd-3-clause
hellfish2/collective.generic.webbuilder
bootstrap.py
38
10649
############################################################################## # # Copyright (c) 2006 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Bootstrap a buildout-based project Simply run this script in a directory containing a buildout.cfg. The script accepts buildout command-line options, so you can use the -c option to specify an alternate configuration file. """ import os, shutil, sys, tempfile, urllib, urllib2, subprocess from optparse import OptionParser if sys.platform == 'win32': def quote(c): if ' ' in c: return '"%s"' % c # work around spawn lamosity on windows else: return c else: quote = str # See zc.buildout.easy_install._has_broken_dash_S for motivation and comments. stdout, stderr = subprocess.Popen( [sys.executable, '-Sc', 'try:\n' ' import ConfigParser\n' 'except ImportError:\n' ' print 1\n' 'else:\n' ' print 0\n'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() has_broken_dash_S = bool(int(stdout.strip())) # In order to be more robust in the face of system Pythons, we want to # run without site-packages loaded. This is somewhat tricky, in # particular because Python 2.6's distutils imports site, so starting # with the -S flag is not sufficient. However, we'll start with that: if not has_broken_dash_S and 'site' in sys.modules: # We will restart with python -S. args = sys.argv[:] args[0:0] = [sys.executable, '-S'] args = map(quote, args) os.execv(sys.executable, args) # Now we are running with -S. We'll get the clean sys.path, import site # because distutils will do it later, and then reset the path and clean # out any namespace packages from site-packages that might have been # loaded by .pth files. clean_path = sys.path[:] import site # imported because of its side effects sys.path[:] = clean_path for k, v in sys.modules.items(): if k in ('setuptools', 'pkg_resources') or ( hasattr(v, '__path__') and len(v.__path__) == 1 and not os.path.exists(os.path.join(v.__path__[0], '__init__.py'))): # This is a namespace package. Remove it. sys.modules.pop(k) is_jython = sys.platform.startswith('java') setuptools_source = 'http://peak.telecommunity.com/dist/ez_setup.py' distribute_source = 'http://python-distribute.org/distribute_setup.py' distribute_source = 'https://bitbucket.org/pypa/setuptools/raw/f657df1f1ed46596d236376649c99a470662b4ba/distribute_setup.py' # parsing arguments def normalize_to_url(option, opt_str, value, parser): if value: if '://' not in value: # It doesn't smell like a URL. value = 'file://%s' % ( urllib.pathname2url( os.path.abspath(os.path.expanduser(value))),) if opt_str == '--download-base' and not value.endswith('/'): # Download base needs a trailing slash to make the world happy. value += '/' else: value = None name = opt_str[2:].replace('-', '_') setattr(parser.values, name, value) usage = '''\ [DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options] Bootstraps a buildout-based project. Simply run this script in a directory containing a buildout.cfg, using the Python that you want bin/buildout to use. Note that by using --setup-source and --download-base to point to local resources, you can keep this script from going over the network. ''' parser = OptionParser(usage=usage) parser.add_option("-v", "--version", dest="version", help="use a specific zc.buildout version") parser.add_option("-d", "--distribute", action="store_true", dest="use_distribute", default=False, help="Use Distribute rather than Setuptools.") parser.add_option("--setup-source", action="callback", dest="setup_source", callback=normalize_to_url, nargs=1, type="string", help=("Specify a URL or file location for the setup file. " "If you use Setuptools, this will default to " + setuptools_source + "; if you use Distribute, this " "will default to " + distribute_source + ".")) parser.add_option("--download-base", action="callback", dest="download_base", callback=normalize_to_url, nargs=1, type="string", help=("Specify a URL or directory for downloading " "zc.buildout and either Setuptools or Distribute. " "Defaults to PyPI.")) parser.add_option("--eggs", help=("Specify a directory for storing eggs. Defaults to " "a temporary directory that is deleted when the " "bootstrap script completes.")) parser.add_option("-t", "--accept-buildout-test-releases", dest='accept_buildout_test_releases', action="store_true", default=False, help=("Normally, if you do not specify a --version, the " "bootstrap script and buildout gets the newest " "*final* versions of zc.buildout and its recipes and " "extensions for you. If you use this flag, " "bootstrap and buildout will get the newest releases " "even if they are alphas or betas.")) parser.add_option("-c", None, action="store", dest="config_file", help=("Specify the path to the buildout configuration " "file to be used.")) options, args = parser.parse_args() if options.eggs: eggs_dir = os.path.abspath(os.path.expanduser(options.eggs)) else: eggs_dir = tempfile.mkdtemp() if options.setup_source is None: if options.use_distribute: options.setup_source = distribute_source else: options.setup_source = setuptools_source if options.accept_buildout_test_releases: args.insert(0, 'buildout:accept-buildout-test-releases=true') try: import pkg_resources import setuptools # A flag. Sometimes pkg_resources is installed alone. if not hasattr(pkg_resources, '_distribute'): raise ImportError except ImportError: ez_code = urllib2.urlopen( options.setup_source).read().replace('\r\n', '\n') ez = {} exec ez_code in ez setup_args = dict(to_dir=eggs_dir, download_delay=0) if options.download_base: setup_args['download_base'] = options.download_base if options.use_distribute: setup_args['no_fake'] = True if sys.version_info[:2] == (2, 4): setup_args['version'] = '0.6.32' ez['use_setuptools'](**setup_args) if 'pkg_resources' in sys.modules: reload(sys.modules['pkg_resources']) import pkg_resources # This does not (always?) update the default working set. We will # do it. for path in sys.path: if path not in pkg_resources.working_set.entries: pkg_resources.working_set.add_entry(path) cmd = [quote(sys.executable), '-c', quote('from setuptools.command.easy_install import main; main()'), '-mqNxd', quote(eggs_dir)] if not has_broken_dash_S: cmd.insert(1, '-S') find_links = options.download_base if not find_links: find_links = os.environ.get('bootstrap-testing-find-links') if not find_links and options.accept_buildout_test_releases: find_links = 'http://downloads.buildout.org/' if find_links: cmd.extend(['-f', quote(find_links)]) if options.use_distribute: setup_requirement = 'distribute' else: setup_requirement = 'setuptools' ws = pkg_resources.working_set setup_requirement_path = ws.find( pkg_resources.Requirement.parse(setup_requirement)).location env = dict( os.environ, PYTHONPATH=setup_requirement_path) requirement = 'zc.buildout' version = options.version if version is None and not options.accept_buildout_test_releases: # Figure out the most recent final version of zc.buildout. import setuptools.package_index _final_parts = '*final-', '*final' def _final_version(parsed_version): for part in parsed_version: if (part[:1] == '*') and (part not in _final_parts): return False return True index = setuptools.package_index.PackageIndex( search_path=[setup_requirement_path]) if find_links: index.add_find_links((find_links,)) req = pkg_resources.Requirement.parse(requirement) if index.obtain(req) is not None: best = [] bestv = None for dist in index[req.project_name]: distv = dist.parsed_version if distv >= pkg_resources.parse_version('2dev'): continue if _final_version(distv): if bestv is None or distv > bestv: best = [dist] bestv = distv elif distv == bestv: best.append(dist) if best: best.sort() version = best[-1].version if version: requirement += '=='+version else: requirement += '<2dev' cmd.append(requirement) if is_jython: import subprocess exitcode = subprocess.Popen(cmd, env=env).wait() else: # Windows prefers this, apparently; otherwise we would prefer subprocess exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env])) if exitcode != 0: sys.stdout.flush() sys.stderr.flush() print ("An error occurred when trying to install zc.buildout. " "Look above this message for any errors that " "were output by easy_install.") sys.exit(exitcode) ws.add_entry(eggs_dir) ws.require(requirement) import zc.buildout.buildout # If there isn't already a command in the args, add bootstrap if not [a for a in args if '=' not in a]: args.append('bootstrap') # if -c was provided, we push it back into args for buildout's main function if options.config_file is not None: args[0:0] = ['-c', options.config_file] zc.buildout.buildout.main(args) if not options.eggs: # clean up temporary egg directory shutil.rmtree(eggs_dir)
bsd-3-clause
t123/ReadingTool.Python
lib/models/model.py
1
11459
import bz2, re from lib.stringutil import StringUtil class TermState: Invalid, Known, Unknown, Ignored, NotSeen = range(5) @staticmethod def ToString(state): if state==0: return "Invalid" elif state==1: return "Known" elif state==2: return "Unknown" elif state==3: return "Ignored" elif state==4: return "NotSeen" raise Exception("Unknown int state") @staticmethod def ToEnum(state): state = str(state).lower() if state=="invalid": return 0 elif state=="known": return 1 elif state=="unknown": return 2 elif state=="ignored": return 3 elif state=="notseen": return 4 raise Exception("Unknown string state") class ItemType: Unknown, Text, Video = range(3) @staticmethod def ToString(itemType): if itemType==0: return "Invalid" elif itemType==1: return "Text" elif itemType==2: return "Video" raise Exception("Unknown int itemType") class TermType: Unknown, Create, Modify, Delete = range(4) @staticmethod def ToString(termType): if termType==0: return "Unknown" elif termType==1: return "Create" elif termType==2: return "Modify" elif termType==3: return "Delete" raise Exception("Unknown int termType") class LanguageDirection: Unknown, LeftToRight, RightToLeft = range(3) class User(): def __init__(self): self.userId = None self.username = "" self.lastLogin = None self.accessKey = "" self.accessSecret = "" self.syncData = False def hasCredentials(self): if not self.accessKey or not self.accessSecret: return False if len(self.accessKey)!=20 or len(self.accessSecret)!=50: return False return True class Language(): TERM_REGEX = "([a-zA-ZÀ-ÖØ-öø-ÿĀ-ſƀ-ɏ\’\'-]+)|(\s+)|(\d+)|(__\d+__)|(<\/?[a-z][A-Z0-9]*[^>]*>)|(.)" def __init__(self): self.languageId = None self.name = "" self.created = None self.modified = None self.isArchived = False self.languageCode = "--" self.userId = None self.termRegex = Language.TERM_REGEX self.direction = LanguageDirection.LeftToRight self.theme = None self.sourceCode = "--" def toDict(self): d = {} d["languageId"] = str(self.languageId) d["name"] = self.name d["created"] = self.created d["modified"] = self.modified d["isArchived"] = self.isArchived d["languageCode"] = self.languageCode d["userId"] = str(self.userId) d["termRegex"] = self.termRegex d["direction"] = self.direction d["theme"] = self.theme d["sourceCode"] = self.sourceCode return d class LanguageCode(): def __init__(self): self.code = "" self.name = "" class LanguagePlugin(): def __init__(self): self.pluginId = None self.name = "" self.description = "" self.enabled = False self.content = "" self.uuid = None class SharedTerm(): def __init__(self): self._phrase = "" self.id = None self.code = "" self.lowerPhrase = "" self.basePhrase = "" self.sentence = "" self.definition = "" self.language = "" self.source = "" @property def phrase(self): return self._phrase @phrase.setter def phrase(self, value): self._phrase = value self.lowerPhrase = (value or "").lower() class Term(): def __init__(self): self.termId = None self.created = None self.modified = None self._phrase = "" self._isFragment = False self.lowerPhrase = "" self.basePhrase = "" self.definition = "" self.sentence = "" self.languageId = None self.state = TermState.Unknown self.userId = None self.itemSourceId = None self.language = "" self.itemSource = "" self.sourceCode = "" self.itemSourceCollection = "" #only for search self.itemSourceTitle = "" #only for search def fullDefinition(self, joinString="<br/>"): fullDef = "" if joinString=="<br/>": if not StringUtil.isEmpty(self.basePhrase): fullDef += self.basePhrase + joinString if not StringUtil.isEmpty(self.definition): fullDef += self.definition return re.sub(r"\n", "<br/>", fullDef) if joinString=="\n": if not StringUtil.isEmpty(self.basePhrase): fullDef += self.basePhrase + joinString if not StringUtil.isEmpty(self.definition): fullDef += self.definition return fullDef if joinString==" ; ": if not StringUtil.isEmpty(self.basePhrase): fullDef += self.basePhrase + joinString if not StringUtil.isEmpty(self.definition): fullDef += self.definition return re.sub(r"\n", " ; ", fullDef) @property def phrase(self): return self._phrase @phrase.setter def phrase(self, value): self._phrase = value self.lowerPhrase = (value or "").lower() if " " in value: self._isFragment = True @property def isFragment(self): return self._isFragment @isFragment.setter def isFragment(self, value): self._isFragment = value; def toDict(self): d = {} d["termId"] = str(self.termId) d["created"] = self.created d["modified"] = self.modified d["phrase"] = self.phrase d["lowerPhrase"] = self.lowerPhrase d["basePhrase"] = self.basePhrase d["definition"] = self.definition d["sentence"] = self.sentence d["languageId"] = str(self.languageId) d["state"] = TermState.ToString(self.state).lower() #historical d["userId"] = str(self.userId) d["itemSourceId"] = str(self.itemSourceId) d["language"] = self.language d["itemSource"] = self.itemSource d["isFragment"] = self.isFragment return d class TermLog(): def __init__(self): self.entryDate = None self.termId = None self.state = None self.type = TermType.Unknown self.languageId = None self.userId = None def toDict(self): d = {} d["entryDate"] = self.entryDate d["termId"] = str(self.termId) d["state"] = TermState.ToString(self.state) d["type"] = TermType.ToString(self.type) d["languageId"] = str(self.languageId) d["userId"] = str(self.userId) return d class Item(): def __init__(self): self.itemId = None self.created = None self.modified = None self.itemType = ItemType.Text self.userId = None self.collectionName = "" self.collectionNo = None self.mediaUri = "" self.lastRead = None self.l1Title = "" self.l2Title = "" self.l1LanguageId = None self.l2LanguageId = None self.readTimes = 0 self.listenedTimes = 0 self.l1Language = None self.l2Language = None self.l1Content = None self.l2Content = None def getL1Content(self): if self.l1Content is None or StringUtil.isEmpty(self.l1Content): return "" return bz2.decompress(self.l1Content).decode() def setL1Content(self, value): if value is None or StringUtil.isEmpty(value): self.l1Content = None return self.l1Content = bz2.compress(value.encode()) def getL2Content(self): if self.l2Content is None or StringUtil.isEmpty(self.l2Content): return "" return bz2.decompress(self.l2Content).decode() def setL2Content(self, value): if value is None or StringUtil.isEmpty(value): self.l2Content = None return self.l2Content = bz2.compress(value.encode()) def hasMedia(self): return not StringUtil.isEmpty(self.mediaUri) def isParallel(self): return not StringUtil.isEmpty(self.l2Content) def name(self): name = "" if self.collectionNo: name += str(self.collectionNo) + ". " if not StringUtil.isEmpty(self.collectionName): name += self.collectionName + " - " name += self.l1Title return name def toDict(self): d = {} d["itemId"] = str(self.itemId) d["created"] = self.created d["modified"] = self.modified d["itemType"] = self.itemType d["userId"] = str(self.userId) d["collectionName"] = self.collectionName d["collectionNo"] = self.collectionNo d["mediaUri"] = self.mediaUri d["lastRead"] = self.lastRead d["l1Title"] = self.l1Title d["l2Title"] = self.l2Title d["l1LanguageId"] = str(self.l1LanguageId) d["l2LanguageId"] = str(self.l2LanguageId) d["readTimes"] = self.readTimes d["listenedTimes"] = self.listenedTimes d["l1Language"] = self.l1Language d["l2Language"] = self.l2Language d["isParallel"] = self.isParallel() d["hasMedia"] = self.hasMedia() if self.l1Content is not None: d["l1Content"] = self.getL1Content() else: d["l1Content"] = "" if self.l2Content is not None: try: d["l2Content"] = self.getL2Content() except ValueError: #Find all only returns 1st 20 bytes d["l2Content"] = "" else: d["l2Content"] = "" return d class Plugin(): def __init__(self): self.pluginId = None self.description = "" self.name = "" self.content = "" self.uuid = "" self.version = 0 self.local = False def toDict(self): d = {} d["pluginId"] = str(self.pluginId) d["description"] = self.description d["name"] = self.name d["content"] = self.content d["uuid"] = self.uuid d["version"] = self.version d["local"] = self.local return d class Storage(): def __init__(self): self.uuid = "" self.key = "" self.value = "" def toDict(self): d = {} d["uuid"] = str(self.uuid) d["key"] = self.key d["value"] = self.value return d
agpl-3.0
paweljasinski/ironpython3
Src/StdLib/Lib/encodings/iso8859_11.py
272
12335
""" Python Character Mapping Codec iso8859_11 generated from 'MAPPINGS/ISO8859/8859-11.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-11', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\x80' # 0x80 -> <control> '\x81' # 0x81 -> <control> '\x82' # 0x82 -> <control> '\x83' # 0x83 -> <control> '\x84' # 0x84 -> <control> '\x85' # 0x85 -> <control> '\x86' # 0x86 -> <control> '\x87' # 0x87 -> <control> '\x88' # 0x88 -> <control> '\x89' # 0x89 -> <control> '\x8a' # 0x8A -> <control> '\x8b' # 0x8B -> <control> '\x8c' # 0x8C -> <control> '\x8d' # 0x8D -> <control> '\x8e' # 0x8E -> <control> '\x8f' # 0x8F -> <control> '\x90' # 0x90 -> <control> '\x91' # 0x91 -> <control> '\x92' # 0x92 -> <control> '\x93' # 0x93 -> <control> '\x94' # 0x94 -> <control> '\x95' # 0x95 -> <control> '\x96' # 0x96 -> <control> '\x97' # 0x97 -> <control> '\x98' # 0x98 -> <control> '\x99' # 0x99 -> <control> '\x9a' # 0x9A -> <control> '\x9b' # 0x9B -> <control> '\x9c' # 0x9C -> <control> '\x9d' # 0x9D -> <control> '\x9e' # 0x9E -> <control> '\x9f' # 0x9F -> <control> '\xa0' # 0xA0 -> NO-BREAK SPACE '\u0e01' # 0xA1 -> THAI CHARACTER KO KAI '\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI '\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT '\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI '\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON '\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG '\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU '\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN '\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING '\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG '\u0e0b' # 0xAB -> THAI CHARACTER SO SO '\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE '\u0e0d' # 0xAD -> THAI CHARACTER YO YING '\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA '\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK '\u0e10' # 0xB0 -> THAI CHARACTER THO THAN '\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO '\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO '\u0e13' # 0xB3 -> THAI CHARACTER NO NEN '\u0e14' # 0xB4 -> THAI CHARACTER DO DEK '\u0e15' # 0xB5 -> THAI CHARACTER TO TAO '\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG '\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN '\u0e18' # 0xB8 -> THAI CHARACTER THO THONG '\u0e19' # 0xB9 -> THAI CHARACTER NO NU '\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI '\u0e1b' # 0xBB -> THAI CHARACTER PO PLA '\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG '\u0e1d' # 0xBD -> THAI CHARACTER FO FA '\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN '\u0e1f' # 0xBF -> THAI CHARACTER FO FAN '\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO '\u0e21' # 0xC1 -> THAI CHARACTER MO MA '\u0e22' # 0xC2 -> THAI CHARACTER YO YAK '\u0e23' # 0xC3 -> THAI CHARACTER RO RUA '\u0e24' # 0xC4 -> THAI CHARACTER RU '\u0e25' # 0xC5 -> THAI CHARACTER LO LING '\u0e26' # 0xC6 -> THAI CHARACTER LU '\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN '\u0e28' # 0xC8 -> THAI CHARACTER SO SALA '\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI '\u0e2a' # 0xCA -> THAI CHARACTER SO SUA '\u0e2b' # 0xCB -> THAI CHARACTER HO HIP '\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA '\u0e2d' # 0xCD -> THAI CHARACTER O ANG '\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK '\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI '\u0e30' # 0xD0 -> THAI CHARACTER SARA A '\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT '\u0e32' # 0xD2 -> THAI CHARACTER SARA AA '\u0e33' # 0xD3 -> THAI CHARACTER SARA AM '\u0e34' # 0xD4 -> THAI CHARACTER SARA I '\u0e35' # 0xD5 -> THAI CHARACTER SARA II '\u0e36' # 0xD6 -> THAI CHARACTER SARA UE '\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE '\u0e38' # 0xD8 -> THAI CHARACTER SARA U '\u0e39' # 0xD9 -> THAI CHARACTER SARA UU '\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT '\u0e40' # 0xE0 -> THAI CHARACTER SARA E '\u0e41' # 0xE1 -> THAI CHARACTER SARA AE '\u0e42' # 0xE2 -> THAI CHARACTER SARA O '\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN '\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI '\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO '\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK '\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU '\u0e48' # 0xE8 -> THAI CHARACTER MAI EK '\u0e49' # 0xE9 -> THAI CHARACTER MAI THO '\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI '\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA '\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT '\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT '\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN '\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN '\u0e50' # 0xF0 -> THAI DIGIT ZERO '\u0e51' # 0xF1 -> THAI DIGIT ONE '\u0e52' # 0xF2 -> THAI DIGIT TWO '\u0e53' # 0xF3 -> THAI DIGIT THREE '\u0e54' # 0xF4 -> THAI DIGIT FOUR '\u0e55' # 0xF5 -> THAI DIGIT FIVE '\u0e56' # 0xF6 -> THAI DIGIT SIX '\u0e57' # 0xF7 -> THAI DIGIT SEVEN '\u0e58' # 0xF8 -> THAI DIGIT EIGHT '\u0e59' # 0xF9 -> THAI DIGIT NINE '\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU '\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT '\ufffe' '\ufffe' '\ufffe' '\ufffe' ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
apache-2.0
caalle/Python-koans
python 2/koans/about_monkey_patching.py
6
1383
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Related to AboutOpenClasses in the Ruby Koans # from runner.koan import * class AboutMonkeyPatching(Koan): class Dog(object): def bark(self): return "WOOF" def test_as_defined_dogs_do_bark(self): fido = self.Dog() self.assertEqual(__, fido.bark()) # ------------------------------------------------------------------ # Add a new method to an existing class. def test_after_patching_dogs_can_both_wag_and_bark(self): def wag(self): return "HAPPY" self.Dog.wag = wag fido = self.Dog() self.assertEqual(__, fido.wag()) self.assertEqual(__, fido.bark()) # ------------------------------------------------------------------ def test_most_built_in_classes_cannot_be_monkey_patched(self): try: int.is_even = lambda self: (self % 2) == 0 except StandardError as ex: self.assertMatch(__, ex[0]) # ------------------------------------------------------------------ class MyInt(int): pass def test_subclasses_of_built_in_classes_can_be_be_monkey_patched(self): self.MyInt.is_even = lambda self: (self % 2) == 0 self.assertEqual(____, self.MyInt(1).is_even()) self.assertEqual(____, self.MyInt(2).is_even())
mit
rowinggolfer/openmolar2
src/lib_openmolar/admin/db_orm/admin_address_link.py
1
3669
#! /usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### ## ## ## Copyright 2010-2012, Neil Wallace <neil@openmolar.com> ## ## ## ## This program is free software: you can redistribute it and/or modify ## ## it under the terms of the GNU General Public License as published by ## ## the Free Software Foundation, either version 3 of the License, or ## ## (at your option) any later version. ## ## ## ## This program is distributed in the hope that it will be useful, ## ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## ## GNU General Public License for more details. ## ## ## ## You should have received a copy of the GNU General Public License ## ## along with this program. If not, see <http://www.gnu.org/licenses/>. ## ## ## ############################################################################### ''' Provides a DemoGenerator for the address_link table ''' from random import randint from PyQt4 import QtSql from lib_openmolar.common.db_orm import InsertableRecord TABLENAME = "address_link" class DemoGenerator(object): def __init__(self, database=None): q_query= QtSql.QSqlQuery( "select min(ix), max(ix) from patients", database) if q_query.first(): self.min_patient_id = q_query.value(0).toInt()[0] self.max_patient_id = q_query.value(1).toInt()[0] else: self.min_patient_id, self.max_patient_id = 0,0 q_query= QtSql.QSqlQuery( "select min(ix), max(ix) from addresses", database) if q_query.first(): self.max_address_id = q_query.value(1).toInt()[0] self.min_address_id = q_query.value(0).toInt()[0] #reserve id number 1 for the practice address. if self.min_address_id == 1 and self.max_address_id > 1: self.min_address_id == 2 else: self.min_address_id, self.max_address_id = 0,0 self.length = self.max_patient_id - self.min_patient_id self.record = InsertableRecord(database, TABLENAME) self.record.remove(self.record.indexOf('address_cat')) self.record.remove(self.record.indexOf('to_date')) self.record.remove(self.record.indexOf('from_date')) self.record.remove(self.record.indexOf('mailing')) def demo_queries(self): ''' return a list of queries to populate a demo database ''' for patient_id in xrange(self.min_patient_id, self.max_patient_id+1): self.record.clearValues() address_id = (randint(self.min_address_id, self.max_address_id)) #set values, or allow defaults self.record.setValue('patient_id', patient_id) self.record.setValue('address_id', address_id) yield self.record.insert_query if __name__ == "__main__": from lib_openmolar.admin.connect import DemoAdminConnection sc = DemoAdminConnection() sc.connect() builder = DemoGenerator(sc) print builder.demo_queries().next()
gpl-3.0
p0cisk/Quantum-GIS
python/plugins/processing/algs/lidar/lastools/lassort.py
5
2699
# -*- coding: utf-8 -*- """ *************************************************************************** lassort.py --------------------- Date : September 2013 Copyright : (C) 2013 by Martin Isenburg Email : martin near rapidlasso point com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from future import standard_library standard_library.install_aliases() __author__ = 'Martin Isenburg' __date__ = 'September 2013' __copyright__ = '(C) 2013, Martin Isenburg' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from .LAStoolsUtils import LAStoolsUtils from .LAStoolsAlgorithm import LAStoolsAlgorithm from processing.core.parameters import ParameterBoolean class lassort(LAStoolsAlgorithm): BY_GPS_TIME = "BY_GPS_TIME" BY_POINT_SOURCE_ID = "BY_POINT_SOURCE_ID" def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('lassort') self.group, self.i18n_group = self.trAlgorithm('LAStools') self.addParametersVerboseGUI() self.addParametersPointInputGUI() self.addParameter(ParameterBoolean(lassort.BY_GPS_TIME, self.tr("sort by GPS time"), False)) self.addParameter(ParameterBoolean(lassort.BY_POINT_SOURCE_ID, self.tr("sort by point source ID"), False)) self.addParametersPointOutputGUI() self.addParametersAdditionalGUI() def processAlgorithm(self, progress): commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lassort")] self.addParametersVerboseCommands(commands) self.addParametersPointInputCommands(commands) if self.getParameterValue(lassort.BY_GPS_TIME): commands.append("-gps_time") if self.getParameterValue(lassort.BY_POINT_SOURCE_ID): commands.append("-point_source") self.addParametersPointOutputCommands(commands) self.addParametersAdditionalCommands(commands) LAStoolsUtils.runLAStools(commands, progress)
gpl-2.0
gurneyalex/odoo
addons/stock_account/models/stock_valuation_layer.py
3
2175
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import fields, models, tools class StockValuationLayer(models.Model): """Stock Valuation Layer""" _name = 'stock.valuation.layer' _description = 'Stock Valuation Layer' _order = 'create_date, id' _rec_name = 'product_id' active = fields.Boolean(related='product_id.active') company_id = fields.Many2one('res.company', 'Company', readonly=True, required=True) product_id = fields.Many2one('product.product', 'Product', readonly=True, required=True, check_company=True, auto_join=True) categ_id = fields.Many2one('product.category', related='product_id.categ_id') product_tmpl_id = fields.Many2one('product.template', related='product_id.product_tmpl_id') quantity = fields.Float('Quantity', digits=0, help='Quantity', readonly=True) uom_id = fields.Many2one(related='product_id.uom_id', readonly=True, required=True) currency_id = fields.Many2one('res.currency', 'Currency', related='company_id.currency_id', readonly=True, required=True) unit_cost = fields.Monetary('Unit Value', readonly=True) value = fields.Monetary('Total Value', readonly=True) remaining_qty = fields.Float(digits=0, readonly=True) remaining_value = fields.Monetary('Remaining Value', readonly=True) description = fields.Char('Description', readonly=True) stock_valuation_layer_id = fields.Many2one('stock.valuation.layer', 'Linked To', readonly=True, check_company=True) stock_valuation_layer_ids = fields.One2many('stock.valuation.layer', 'stock_valuation_layer_id') stock_move_id = fields.Many2one('stock.move', 'Stock Move', readonly=True, check_company=True, index=True) account_move_id = fields.Many2one('account.move', 'Journal Entry', readonly=True, check_company=True) def init(self): tools.create_index( self._cr, 'stock_valuation_layer_index', self._table, ['product_id', 'remaining_qty', 'stock_move_id', 'company_id', 'create_date'] ) def _update_stock_move(self): """ To be overriden in mrp subcontracting""" return True
agpl-3.0
kevinsawicki/node-gyp
gyp/test/gyp-defines/gyptest-regyp.py
73
1284
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that when the same value is repeated for a gyp define, duplicates are stripped from the regeneration rule. """ import os import TestGyp # Regenerating build files when a gyp file changes is currently only supported # by the make and Android generators. test = TestGyp.TestGyp(formats=['make', 'android']) os.environ['GYP_DEFINES'] = 'key=repeated_value key=value1 key=repeated_value' test.run_gyp('defines.gyp') test.build('defines.gyp') # The last occurrence of a repeated set should take precedence over other # values. See gyptest-multiple-values.py. test.must_contain('action.txt', 'repeated_value') # So the regeneration rule needs to use the correct order. test.must_not_contain( 'Makefile', '"-Dkey=repeated_value" "-Dkey=value1" "-Dkey=repeated_value"') test.must_contain('Makefile', '"-Dkey=value1" "-Dkey=repeated_value"') # Sleep so that the changed gyp file will have a newer timestamp than the # previously generated build files. test.sleep() os.utime("defines.gyp", None) test.build('defines.gyp') test.must_contain('action.txt', 'repeated_value') test.pass_test()
mit
dob71/x2swn
skeinforge/fabmetheus_utilities/geometry/geometry_utilities/evaluate_elements/setting.py
8
6751
""" Boolean geometry utilities. """ from __future__ import absolute_import #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from skeinforge_application.skeinforge_utilities import skeinforge_craft import math __author__ = 'Enrique Perez (perez_enrique@yahoo.com)' __credits__ = 'Art of Illusion <http://www.artofillusion.org/>' __date__ = '$Date: 2008/02/05 $' __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' def _getAccessibleAttribute(attributeName, elementNode): 'Get the accessible attribute.' if attributeName in globalGetAccessibleAttributeSet: return getattr(Setting(elementNode), attributeName, None) return None def getCascadeFloatWithoutSelf(defaultFloat, elementNode, key): 'Get the cascade float.' if key in elementNode.attributes: value = elementNode.attributes[key] functionName = 'get' + key[0].upper() + key[1 :] if functionName in value: if elementNode.parentNode == None: return defaultFloat else: elementNode = elementNode.parentNode return elementNode.getCascadeFloat(defaultFloat, key) def getEdgeWidth(elementNode): 'Get the edge width.' if elementNode == None: return 0.72 preferences = skeinforge_craft.getCraftPreferences('carve') layerHeight = skeinforge_craft.getCraftValue('Layer Height', preferences) layerHeight = getCascadeFloatWithoutSelf(layerHeight, elementNode, 'layerHeight') edgeWidthOverHeight = skeinforge_craft.getCraftValue('Edge Width over Height', preferences) edgeWidthOverHeight = getCascadeFloatWithoutSelf(edgeWidthOverHeight, elementNode, 'edgeWidthOverHeight') return getCascadeFloatWithoutSelf(edgeWidthOverHeight * layerHeight, elementNode, 'edgeWidth') def getImportCoarseness(elementNode, preferences=None): 'Get the importCoarseness.' if elementNode == None: return 1.0 if preferences == None: preferences = skeinforge_craft.getCraftPreferences('carve') importCoarseness = skeinforge_craft.getCraftValue('Import Coarseness', preferences) return getCascadeFloatWithoutSelf(importCoarseness, elementNode, 'importCoarseness') def getImportRadius(elementNode): 'Get the importRadius.' if elementNode == None: return 0.36 preferences = skeinforge_craft.getCraftPreferences('carve') importCoarseness = getImportCoarseness(elementNode, preferences) layerHeight = skeinforge_craft.getCraftValue('Layer Height', preferences) layerHeight = getCascadeFloatWithoutSelf(layerHeight, elementNode, 'layerHeight') edgeWidthOverHeight = skeinforge_craft.getCraftValue('Edge Width over Height', preferences) edgeWidthOverHeight = getCascadeFloatWithoutSelf(edgeWidthOverHeight, elementNode, 'edgeWidthOverHeight') return getCascadeFloatWithoutSelf(0.5 * importCoarseness * layerHeight * edgeWidthOverHeight, elementNode, 'importRadius') def getInteriorOverhangAngle(elementNode): 'Get the interior overhang support angle in degrees.' return getCascadeFloatWithoutSelf(30.0, elementNode, 'interiorOverhangAngle') def getInteriorOverhangRadians(elementNode): 'Get the interior overhang support angle in radians.' return math.radians(getInteriorOverhangAngle(elementNode)) def getLayerHeight(elementNode): 'Get the layer height.' if elementNode == None: return 0.4 preferences = skeinforge_craft.getCraftPreferences('carve') return getCascadeFloatWithoutSelf(skeinforge_craft.getCraftValue('Layer Height', preferences), elementNode, 'layerHeight') def getOverhangAngle(elementNode): 'Get the overhang support angle in degrees.' return getCascadeFloatWithoutSelf(45.0, elementNode, 'overhangAngle') def getOverhangRadians(elementNode): 'Get the overhang support angle in radians.' return math.radians(getOverhangAngle(elementNode)) def getOverhangSpan(elementNode): 'Get the overhang span.' return getCascadeFloatWithoutSelf(2.0 * getLayerHeight(elementNode), elementNode, 'overhangSpan') def getPrecision(elementNode): 'Get the cascade precision.' return getCascadeFloatWithoutSelf(0.2 * getLayerHeight(elementNode), elementNode, 'precision') def getSheetThickness(elementNode): 'Get the sheet thickness.' return getCascadeFloatWithoutSelf(3.0, elementNode, 'sheetThickness') def getTwistPrecision(elementNode): 'Get the twist precision in degrees.' return getCascadeFloatWithoutSelf(5.0, elementNode, 'twistPrecision') def getTwistPrecisionRadians(elementNode): 'Get the twist precision in radians.' return math.radians(getTwistPrecision(elementNode)) class Setting: 'Class to get handle elementNodes in a setting.' def __init__(self, elementNode): 'Initialize.' self.elementNode = elementNode def __repr__(self): 'Get the string representation of this Setting.' return self.elementNode def getEdgeWidth(self): 'Get the edge width.' return getEdgeWidth(self.elementNode) def getImportCoarseness(self): 'Get the importCoarseness.' return getImportCoarseness(self.elementNode) def getImportRadius(self): 'Get the importRadius.' return getImportRadius(self.elementNode) def getInteriorOverhangAngle(self): 'Get the interior overhang support angle in degrees.' return getInteriorOverhangAngle(self.elementNode) def getInteriorOverhangRadians(self): 'Get the interior overhang support angle in radians.' return getInteriorOverhangRadians(self.elementNode) def getLayerHeight(self): 'Get the layer height.' return getLayerHeight(self.elementNode) def getOverhangAngle(self): 'Get the overhang support angle in degrees.' return getOverhangAngle(self.elementNode) def getOverhangRadians(self): 'Get the overhang support angle in radians.' return getOverhangRadians(self.elementNode) def getOverhangSpan(self): 'Get the overhang span.' return getOverhangSpan(self.elementNode) def getPrecision(self): 'Get the cascade precision.' return getPrecision(self.elementNode) def getSheetThickness(self): 'Get the sheet thickness.' return getSheetThickness(self.elementNode) def getTwistPrecision(self): 'Get the twist precision in degrees.' return getTwistPrecision(self.elementNode) def getTwistPrecisionRadians(self): 'Get the twist precision in radians.' return getTwistPrecisionRadians(self.elementNode) globalAccessibleAttributeDictionary = 'getEdgeWidth getImportCoarseness getImportRadius getInteriorOverhangAngle getInteriorOverhangRadians'.split() globalAccessibleAttributeDictionary += 'getLayerHeight getOverhangSpan getOverhangAngle getOverhangRadians'.split() globalAccessibleAttributeDictionary += 'getPrecision getSheetThickness getTwistPrecision getTwistPrecisionRadians'.split() globalGetAccessibleAttributeSet = set(globalAccessibleAttributeDictionary)
gpl-3.0
drakuna/odoo
addons/lunch/__openerp__.py
27
1507
# -*- coding: utf-8 -*- { 'name': 'Lunch', 'sequence': 120, 'version': '1.0', 'depends': ['base', 'web', 'decimal_precision'], 'category': 'Tools', 'summary': 'Lunch Order, Meal, Food', 'description': """ The base module to manage lunch. ================================ Many companies order sandwiches, pizzas and other, from usual vendors, for their employees to offer them more facilities. However lunches management within the company requires proper administration especially when the number of employees or vendors is important. The “Lunch Order” module has been developed to make this management easier but also to offer employees more tools and usability. In addition to a full meal and vendor management, this module offers the possibility to display warning and provides quick order selection based on employee’s preferences. If you want to save your employees' time and avoid them to always have coins in their pockets, this module is essential. """, 'data': [ 'security/lunch_security.xml', 'security/ir.model.access.csv', 'report/lunch_order_views.xml', 'report/lunch_order_templates.xml', 'views/lunch_templates.xml', 'wizard/lucky_order_view.xml', 'views/lunch_views.xml', 'data/lunch_data.xml', ], 'demo': ['data/lunch_demo.xml'], 'qweb': ['static/src/xml/lunch.xml', ], 'installable': True, 'application': True, 'certificate': '001292377792581874189', }
gpl-3.0
mheap/ansible
lib/ansible/modules/cloud/misc/virt_pool.py
43
21722
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Maciej Delmanowski <drybjed@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: virt_pool author: "Maciej Delmanowski (@drybjed)" version_added: "2.0" short_description: Manage libvirt storage pools description: - Manage I(libvirt) storage pools. options: name: required: false aliases: [ "pool" ] description: - name of the storage pool being managed. Note that pool must be previously defined with xml. state: required: false choices: [ "active", "inactive", "present", "absent", "undefined", "deleted" ] description: - specify which state you want a storage pool to be in. If 'active', pool will be started. If 'present', ensure that pool is present but do not change its state; if it's missing, you need to specify xml argument. If 'inactive', pool will be stopped. If 'undefined' or 'absent', pool will be removed from I(libvirt) configuration. If 'deleted', pool contents will be deleted and then pool undefined. command: required: false choices: [ "define", "build", "create", "start", "stop", "destroy", "delete", "undefine", "get_xml", "list_pools", "facts", "info", "status" ] description: - in addition to state management, various non-idempotent commands are available. See examples. autostart: required: false type: bool description: - Specify if a given storage pool should be started automatically on system boot. uri: required: false default: "qemu:///system" description: - I(libvirt) connection uri. xml: required: false description: - XML document used with the define command. mode: required: false choices: [ 'new', 'repair', 'resize', 'no_overwrite', 'overwrite', 'normal', 'zeroed' ] description: - Pass additional parameters to 'build' or 'delete' commands. requirements: - "python >= 2.6" - "python-libvirt" - "python-lxml" ''' EXAMPLES = ''' # Define a new storage pool - virt_pool: command: define name: vms xml: '{{ lookup("template", "pool/dir.xml.j2") }}' # Build a storage pool if it does not exist - virt_pool: command: build name: vms # Start a storage pool - virt_pool: command: create name: vms # List available pools - virt_pool: command: list_pools # Get XML data of a specified pool - virt_pool: command: get_xml name: vms # Stop a storage pool - virt_pool: command: destroy name: vms # Delete a storage pool (destroys contents) - virt_pool: command: delete name: vms # Undefine a storage pool - virt_pool: command: undefine name: vms # Gather facts about storage pools # Facts will be available as 'ansible_libvirt_pools' - virt_pool: command: facts # Gather information about pools managed by 'libvirt' remotely using uri - virt_pool: command: info uri: '{{ item }}' with_items: '{{ libvirt_uris }}' register: storage_pools # Ensure that a pool is active (needs to be defined and built first) - virt_pool: state: active name: vms # Ensure that a pool is inactive - virt_pool: state: inactive name: vms # Ensure that a given pool will be started at boot - virt_pool: autostart: yes name: vms # Disable autostart for a given pool - virt_pool: autostart: no name: vms ''' try: import libvirt except ImportError: HAS_VIRT = False else: HAS_VIRT = True try: from lxml import etree except ImportError: HAS_XML = False else: HAS_XML = True from ansible.module_utils.basic import AnsibleModule VIRT_FAILED = 1 VIRT_SUCCESS = 0 VIRT_UNAVAILABLE = 2 ALL_COMMANDS = [] ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete', 'undefine', 'destroy', 'get_xml', 'define', 'refresh'] HOST_COMMANDS = ['list_pools', 'facts', 'info'] ALL_COMMANDS.extend(ENTRY_COMMANDS) ALL_COMMANDS.extend(HOST_COMMANDS) ENTRY_STATE_ACTIVE_MAP = { 0: "inactive", 1: "active" } ENTRY_STATE_AUTOSTART_MAP = { 0: "no", 1: "yes" } ENTRY_STATE_PERSISTENT_MAP = { 0: "no", 1: "yes" } ENTRY_STATE_INFO_MAP = { 0: "inactive", 1: "building", 2: "running", 3: "degraded", 4: "inaccessible" } ENTRY_BUILD_FLAGS_MAP = { "new": 0, "repair": 1, "resize": 2, "no_overwrite": 4, "overwrite": 8 } ENTRY_DELETE_FLAGS_MAP = { "normal": 0, "zeroed": 1 } ALL_MODES = [] ALL_MODES.extend(ENTRY_BUILD_FLAGS_MAP.keys()) ALL_MODES.extend(ENTRY_DELETE_FLAGS_MAP.keys()) class EntryNotFound(Exception): pass class LibvirtConnection(object): def __init__(self, uri, module): self.module = module conn = libvirt.open(uri) if not conn: raise Exception("hypervisor connection failure") self.conn = conn def find_entry(self, entryid): # entryid = -1 returns a list of everything results = [] # Get active entries for name in self.conn.listStoragePools(): entry = self.conn.storagePoolLookupByName(name) results.append(entry) # Get inactive entries for name in self.conn.listDefinedStoragePools(): entry = self.conn.storagePoolLookupByName(name) results.append(entry) if entryid == -1: return results for entry in results: if entry.name() == entryid: return entry raise EntryNotFound("storage pool %s not found" % entryid) def create(self, entryid): if not self.module.check_mode: return self.find_entry(entryid).create() else: try: state = self.find_entry(entryid).isActive() except: return self.module.exit_json(changed=True) if not state: return self.module.exit_json(changed=True) def destroy(self, entryid): if not self.module.check_mode: return self.find_entry(entryid).destroy() else: if self.find_entry(entryid).isActive(): return self.module.exit_json(changed=True) def undefine(self, entryid): if not self.module.check_mode: return self.find_entry(entryid).undefine() else: if not self.find_entry(entryid): return self.module.exit_json(changed=True) def get_status2(self, entry): state = entry.isActive() return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown") def get_status(self, entryid): if not self.module.check_mode: state = self.find_entry(entryid).isActive() return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown") else: try: state = self.find_entry(entryid).isActive() return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown") except: return ENTRY_STATE_ACTIVE_MAP.get("inactive", "unknown") def get_uuid(self, entryid): return self.find_entry(entryid).UUIDString() def get_xml(self, entryid): return self.find_entry(entryid).XMLDesc(0) def get_info(self, entryid): return self.find_entry(entryid).info() def get_volume_count(self, entryid): return self.find_entry(entryid).numOfVolumes() def get_volume_names(self, entryid): return self.find_entry(entryid).listVolumes() def get_devices(self, entryid): xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) if xml.xpath('/pool/source/device'): result = [] for device in xml.xpath('/pool/source/device'): result.append(device.get('path')) try: return result except: raise ValueError('No devices specified') def get_format(self, entryid): xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) try: result = xml.xpath('/pool/source/format')[0].get('type') except: raise ValueError('Format not specified') return result def get_host(self, entryid): xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) try: result = xml.xpath('/pool/source/host')[0].get('name') except: raise ValueError('Host not specified') return result def get_source_path(self, entryid): xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) try: result = xml.xpath('/pool/source/dir')[0].get('path') except: raise ValueError('Source path not specified') return result def get_path(self, entryid): xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) return xml.xpath('/pool/target/path')[0].text def get_type(self, entryid): xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) return xml.get('type') def build(self, entryid, flags): if not self.module.check_mode: return self.find_entry(entryid).build(flags) else: try: state = self.find_entry(entryid) except: return self.module.exit_json(changed=True) if not state: return self.module.exit_json(changed=True) def delete(self, entryid, flags): if not self.module.check_mode: return self.find_entry(entryid).delete(flags) else: try: state = self.find_entry(entryid) except: return self.module.exit_json(changed=True) if state: return self.module.exit_json(changed=True) def get_autostart(self, entryid): state = self.find_entry(entryid).autostart() return ENTRY_STATE_AUTOSTART_MAP.get(state, "unknown") def get_autostart2(self, entryid): if not self.module.check_mode: return self.find_entry(entryid).autostart() else: try: return self.find_entry(entryid).autostart() except: return self.module.exit_json(changed=True) def set_autostart(self, entryid, val): if not self.module.check_mode: return self.find_entry(entryid).setAutostart(val) else: try: state = self.find_entry(entryid).autostart() except: return self.module.exit_json(changed=True) if bool(state) != val: return self.module.exit_json(changed=True) def refresh(self, entryid): return self.find_entry(entryid).refresh() def get_persistent(self, entryid): state = self.find_entry(entryid).isPersistent() return ENTRY_STATE_PERSISTENT_MAP.get(state, "unknown") def define_from_xml(self, entryid, xml): if not self.module.check_mode: return self.conn.storagePoolDefineXML(xml) else: try: self.find_entry(entryid) except: return self.module.exit_json(changed=True) class VirtStoragePool(object): def __init__(self, uri, module): self.module = module self.uri = uri self.conn = LibvirtConnection(self.uri, self.module) def get_pool(self, entryid): return self.conn.find_entry(entryid) def list_pools(self, state=None): results = [] for entry in self.conn.find_entry(-1): if state: if state == self.conn.get_status2(entry): results.append(entry.name()) else: results.append(entry.name()) return results def state(self): results = [] for entry in self.list_pools(): state_blurb = self.conn.get_status(entry) results.append("%s %s" % (entry, state_blurb)) return results def autostart(self, entryid): return self.conn.set_autostart(entryid, True) def get_autostart(self, entryid): return self.conn.get_autostart2(entryid) def set_autostart(self, entryid, state): return self.conn.set_autostart(entryid, state) def create(self, entryid): return self.conn.create(entryid) def start(self, entryid): return self.conn.create(entryid) def stop(self, entryid): return self.conn.destroy(entryid) def destroy(self, entryid): return self.conn.destroy(entryid) def undefine(self, entryid): return self.conn.undefine(entryid) def status(self, entryid): return self.conn.get_status(entryid) def get_xml(self, entryid): return self.conn.get_xml(entryid) def define(self, entryid, xml): return self.conn.define_from_xml(entryid, xml) def build(self, entryid, flags): return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags, 0)) def delete(self, entryid, flags): return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags, 0)) def refresh(self, entryid): return self.conn.refresh(entryid) def info(self): return self.facts(facts_mode='info') def facts(self, facts_mode='facts'): results = dict() for entry in self.list_pools(): results[entry] = dict() if self.conn.find_entry(entry): data = self.conn.get_info(entry) # libvirt returns maxMem, memory, and cpuTime as long()'s, which # xmlrpclib tries to convert to regular int's during serialization. # This throws exceptions, so convert them to strings here and # assume the other end of the xmlrpc connection can figure things # out or doesn't care. results[entry] = { "status": ENTRY_STATE_INFO_MAP.get(data[0], "unknown"), "size_total": str(data[1]), "size_used": str(data[2]), "size_available": str(data[3]), } results[entry]["autostart"] = self.conn.get_autostart(entry) results[entry]["persistent"] = self.conn.get_persistent(entry) results[entry]["state"] = self.conn.get_status(entry) results[entry]["path"] = self.conn.get_path(entry) results[entry]["type"] = self.conn.get_type(entry) results[entry]["uuid"] = self.conn.get_uuid(entry) if self.conn.find_entry(entry).isActive(): results[entry]["volume_count"] = self.conn.get_volume_count(entry) results[entry]["volumes"] = list() for volume in self.conn.get_volume_names(entry): results[entry]["volumes"].append(volume) else: results[entry]["volume_count"] = -1 try: results[entry]["host"] = self.conn.get_host(entry) except ValueError: pass try: results[entry]["source_path"] = self.conn.get_source_path(entry) except ValueError: pass try: results[entry]["format"] = self.conn.get_format(entry) except ValueError: pass try: devices = self.conn.get_devices(entry) results[entry]["devices"] = devices except ValueError: pass else: results[entry]["state"] = self.conn.get_status(entry) facts = dict() if facts_mode == 'facts': facts["ansible_facts"] = dict() facts["ansible_facts"]["ansible_libvirt_pools"] = results elif facts_mode == 'info': facts['pools'] = results return facts def core(module): state = module.params.get('state', None) name = module.params.get('name', None) command = module.params.get('command', None) uri = module.params.get('uri', None) xml = module.params.get('xml', None) autostart = module.params.get('autostart', None) mode = module.params.get('mode', None) v = VirtStoragePool(uri, module) res = {} if state and command == 'list_pools': res = v.list_pools(state=state) if not isinstance(res, dict): res = {command: res} return VIRT_SUCCESS, res if state: if not name: module.fail_json(msg="state change requires a specified name") res['changed'] = False if state in ['active']: if v.status(name) is not 'active': res['changed'] = True res['msg'] = v.start(name) elif state in ['present']: try: v.get_pool(name) except EntryNotFound: if not xml: module.fail_json(msg="storage pool '" + name + "' not present, but xml not specified") v.define(name, xml) res = {'changed': True, 'created': name} elif state in ['inactive']: entries = v.list_pools() if name in entries: if v.status(name) is not 'inactive': res['changed'] = True res['msg'] = v.destroy(name) elif state in ['undefined', 'absent']: entries = v.list_pools() if name in entries: if v.status(name) is not 'inactive': v.destroy(name) res['changed'] = True res['msg'] = v.undefine(name) elif state in ['deleted']: entries = v.list_pools() if name in entries: if v.status(name) is not 'inactive': v.destroy(name) v.delete(name, mode) res['changed'] = True res['msg'] = v.undefine(name) else: module.fail_json(msg="unexpected state") return VIRT_SUCCESS, res if command: if command in ENTRY_COMMANDS: if not name: module.fail_json(msg="%s requires 1 argument: name" % command) if command == 'define': if not xml: module.fail_json(msg="define requires xml argument") try: v.get_pool(name) except EntryNotFound: v.define(name, xml) res = {'changed': True, 'created': name} return VIRT_SUCCESS, res elif command == 'build': res = v.build(name, mode) if not isinstance(res, dict): res = {'changed': True, command: res} return VIRT_SUCCESS, res elif command == 'delete': res = v.delete(name, mode) if not isinstance(res, dict): res = {'changed': True, command: res} return VIRT_SUCCESS, res res = getattr(v, command)(name) if not isinstance(res, dict): res = {command: res} return VIRT_SUCCESS, res elif hasattr(v, command): res = getattr(v, command)() if not isinstance(res, dict): res = {command: res} return VIRT_SUCCESS, res else: module.fail_json(msg="Command %s not recognized" % command) if autostart is not None: if not name: module.fail_json(msg="state change requires a specified name") res['changed'] = False if autostart: if not v.get_autostart(name): res['changed'] = True res['msg'] = v.set_autostart(name, True) else: if v.get_autostart(name): res['changed'] = True res['msg'] = v.set_autostart(name, False) return VIRT_SUCCESS, res module.fail_json(msg="expected state or command parameter to be specified") def main(): module = AnsibleModule( argument_spec=dict( name=dict(aliases=['pool']), state=dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']), command=dict(choices=ALL_COMMANDS), uri=dict(default='qemu:///system'), xml=dict(), autostart=dict(type='bool'), mode=dict(choices=ALL_MODES), ), supports_check_mode=True ) if not HAS_VIRT: module.fail_json( msg='The `libvirt` module is not importable. Check the requirements.' ) if not HAS_XML: module.fail_json( msg='The `lxml` module is not importable. Check the requirements.' ) rc = VIRT_SUCCESS try: rc, result = core(module) except Exception as e: module.fail_json(msg=str(e)) if rc != 0: # something went wrong emit the msg module.fail_json(rc=rc, msg=result) else: module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
coinkeeper/2015-06-22_18-31_bitcoin
qa/rpc-tests/test_framework/util.py
77
13035
# Copyright (c) 2014 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Helpful routines for regression testing # # Add python-bitcoinrpc to module search path: import os import sys from decimal import Decimal, ROUND_DOWN import json import random import shutil import subprocess import time import re from authproxy import AuthServiceProxy, JSONRPCException from util import * def p2p_port(n): return 11000 + n + os.getpid()%999 def rpc_port(n): return 12000 + n + os.getpid()%999 def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def sync_blocks(rpc_connections, wait=1): """ Wait until everybody has the same block count """ while True: counts = [ x.getblockcount() for x in rpc_connections ] if counts == [ counts[0] ]*len(counts): break time.sleep(wait) def sync_mempools(rpc_connections, wait=1): """ Wait until everybody has the same transactions in their memory pools """ while True: pool = set(rpc_connections[0].getrawmempool()) num_match = 1 for i in range(1, len(rpc_connections)): if set(rpc_connections[i].getrawmempool()) == pool: num_match = num_match+1 if num_match == len(rpc_connections): break time.sleep(wait) bitcoind_processes = {} def initialize_datadir(dirname, n): datadir = os.path.join(dirname, "node"+str(n)) if not os.path.isdir(datadir): os.makedirs(datadir) with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f: f.write("regtest=1\n"); f.write("rpcuser=rt\n"); f.write("rpcpassword=rt\n"); f.write("port="+str(p2p_port(n))+"\n"); f.write("rpcport="+str(rpc_port(n))+"\n"); return datadir def initialize_chain(test_dir): """ Create (or copy from cache) a 200-block-long chain and 4 wallets. bitcoind and bitcoin-cli must be in search path. """ if not os.path.isdir(os.path.join("cache", "node0")): devnull = open("/dev/null", "w+") # Create cache directories, run bitcoinds: for i in range(4): datadir=initialize_datadir("cache", i) args = [ os.getenv("BITCOIND", "bitcoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ] if i > 0: args.append("-connect=127.0.0.1:"+str(p2p_port(0))) bitcoind_processes[i] = subprocess.Popen(args) if os.getenv("PYTHON_DEBUG", ""): print "initialize_chain: bitcoind started, calling bitcoin-cli -rpcwait getblockcount" subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir, "-rpcwait", "getblockcount"], stdout=devnull) if os.getenv("PYTHON_DEBUG", ""): print "initialize_chain: bitcoin-cli -rpcwait getblockcount completed" devnull.close() rpcs = [] for i in range(4): try: url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),) rpcs.append(AuthServiceProxy(url)) except: sys.stderr.write("Error connecting to "+url+"\n") sys.exit(1) # Create a 200-block-long chain; each of the 4 nodes # gets 25 mature blocks and 25 immature. # blocks are created with timestamps 10 minutes apart, starting # at 1 Jan 2014 block_time = 1388534400 for i in range(2): for peer in range(4): for j in range(25): set_node_times(rpcs, block_time) rpcs[peer].generate(1) block_time += 10*60 # Must sync before next peer starts generating blocks sync_blocks(rpcs) # Shut them down, and clean up cache directories: stop_nodes(rpcs) wait_bitcoinds() for i in range(4): os.remove(log_filename("cache", i, "debug.log")) os.remove(log_filename("cache", i, "db.log")) os.remove(log_filename("cache", i, "peers.dat")) os.remove(log_filename("cache", i, "fee_estimates.dat")) for i in range(4): from_dir = os.path.join("cache", "node"+str(i)) to_dir = os.path.join(test_dir, "node"+str(i)) shutil.copytree(from_dir, to_dir) initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf def initialize_chain_clean(test_dir, num_nodes): """ Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization. """ for i in range(num_nodes): datadir=initialize_datadir(test_dir, i) def _rpchost_to_args(rpchost): '''Convert optional IP:port spec to rpcconnect/rpcport args''' if rpchost is None: return [] match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost) if not match: raise ValueError('Invalid RPC host spec ' + rpchost) rpcconnect = match.group(1) rpcport = match.group(2) if rpcconnect.startswith('['): # remove IPv6 [...] wrapping rpcconnect = rpcconnect[1:-1] rv = ['-rpcconnect=' + rpcconnect] if rpcport: rv += ['-rpcport=' + rpcport] return rv def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None): """ Start a bitcoind and return RPC connection to it """ datadir = os.path.join(dirname, "node"+str(i)) if binary is None: binary = os.getenv("BITCOIND", "bitcoind") args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ] if extra_args is not None: args.extend(extra_args) bitcoind_processes[i] = subprocess.Popen(args) devnull = open("/dev/null", "w+") if os.getenv("PYTHON_DEBUG", ""): print "start_node: bitcoind started, calling bitcoin-cli -rpcwait getblockcount" subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir] + _rpchost_to_args(rpchost) + ["-rpcwait", "getblockcount"], stdout=devnull) if os.getenv("PYTHON_DEBUG", ""): print "start_node: calling bitcoin-cli -rpcwait getblockcount returned" devnull.close() url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i)) if timewait is not None: proxy = AuthServiceProxy(url, timeout=timewait) else: proxy = AuthServiceProxy(url) proxy.url = url # store URL on proxy for info return proxy def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None): """ Start multiple bitcoinds, return RPC connections to them """ if extra_args is None: extra_args = [ None for i in range(num_nodes) ] if binary is None: binary = [ None for i in range(num_nodes) ] return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ] def log_filename(dirname, n_node, logname): return os.path.join(dirname, "node"+str(n_node), "regtest", logname) def stop_node(node, i): node.stop() bitcoind_processes[i].wait() del bitcoind_processes[i] def stop_nodes(nodes): for node in nodes: node.stop() del nodes[:] # Emptying array closes connections as a side effect def set_node_times(nodes, t): for node in nodes: node.setmocktime(t) def wait_bitcoinds(): # Wait for all bitcoinds to cleanly exit for bitcoind in bitcoind_processes.values(): bitcoind.wait() bitcoind_processes.clear() def connect_nodes(from_connection, node_num): ip_port = "127.0.0.1:"+str(p2p_port(node_num)) from_connection.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()): time.sleep(0.1) def connect_nodes_bi(nodes, a, b): connect_nodes(nodes[a], b) connect_nodes(nodes[b], a) def find_output(node, txid, amount): """ Return index to output of txid with value amount Raises exception if there is none. """ txdata = node.getrawtransaction(txid, 1) for i in range(len(txdata["vout"])): if txdata["vout"][i]["value"] == amount: return i raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount))) def gather_inputs(from_node, amount_needed, confirmations_required=1): """ Return a random set of unspent txouts that are enough to pay amount_needed """ assert(confirmations_required >=0) utxo = from_node.listunspent(confirmations_required) random.shuffle(utxo) inputs = [] total_in = Decimal("0.00000000") while total_in < amount_needed and len(utxo) > 0: t = utxo.pop() total_in += t["amount"] inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } ) if total_in < amount_needed: raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in)) return (total_in, inputs) def make_change(from_node, amount_in, amount_out, fee): """ Create change output(s), return them """ outputs = {} amount = amount_out+fee change = amount_in - amount if change > amount*2: # Create an extra change output to break up big inputs change_address = from_node.getnewaddress() # Split change in two, being careful of rounding: outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) change = amount_in - amount - outputs[change_address] if change > 0: outputs[from_node.getnewaddress()] = change return outputs def send_zeropri_transaction(from_node, to_node, amount, fee): """ Create&broadcast a zero-priority transaction. Returns (txid, hex-encoded-txdata) Ensures transaction is zero-priority by first creating a send-to-self, then using its output """ # Create a send-to-self with confirmed inputs: self_address = from_node.getnewaddress() (total_in, inputs) = gather_inputs(from_node, amount+fee*2) outputs = make_change(from_node, total_in, amount+fee, fee) outputs[self_address] = float(amount+fee) self_rawtx = from_node.createrawtransaction(inputs, outputs) self_signresult = from_node.signrawtransaction(self_rawtx) self_txid = from_node.sendrawtransaction(self_signresult["hex"], True) vout = find_output(from_node, self_txid, amount+fee) # Now immediately spend the output to create a 1-input, 1-output # zero-priority transaction: inputs = [ { "txid" : self_txid, "vout" : vout } ] outputs = { to_node.getnewaddress() : float(amount) } rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"]) def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random zero-priority transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment*random.randint(0,fee_variants) (txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee) return (txid, txhex, fee) def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment*random.randint(0,fee_variants) (total_in, inputs) = gather_inputs(from_node, amount+fee) outputs = make_change(from_node, total_in, amount, fee) outputs[to_node.getnewaddress()] = float(amount) rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"], fee) def assert_equal(thing1, thing2): if thing1 != thing2: raise AssertionError("%s != %s"%(str(thing1),str(thing2))) def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError("%s <= %s"%(str(thing1),str(thing2))) def assert_raises(exc, fun, *args, **kwds): try: fun(*args, **kwds) except exc: pass except Exception as e: raise AssertionError("Unexpected exception raised: "+type(e).__name__) else: raise AssertionError("No exception raised")
mit
eviljeff/olympia
src/olympia/amo/tests/test_readonly.py
7
1617
from django.db import models import MySQLdb as mysql import pytest from pyquery import PyQuery as pq from olympia.addons.models import Addon from olympia.amo.tests import reverse_ns @pytest.yield_fixture def read_only_mode(client, settings, db): def _db_error(*args, **kwargs): raise mysql.OperationalError("You can't do this in read-only mode.") settings.REPLICA_DATABASES = ['default'] models.signals.pre_save.connect(_db_error) models.signals.pre_delete.connect(_db_error) from olympia.lib.settings_base import read_only_mode env = { 'REPLICA_DATABASES': settings.REPLICA_DATABASES, 'DATABASES': settings.DATABASES, } read_only_mode(env) for key, value in env.items(): setattr(settings, key, value) client.handler.load_middleware() yield models.signals.pre_save.disconnect(_db_error) models.signals.pre_delete.disconnect(_db_error) def test_db_error(read_only_mode): with pytest.raises(mysql.OperationalError): Addon.objects.create(id=12) def test_bail_on_post(read_only_mode, client): response = client.post('/en-US/developers/') assert response.status_code == 503 title = pq(response.content)('title').text() assert title.startswith('Maintenance in progress'), title @pytest.mark.parametrize('method', ('post', 'put', 'delete', 'patch')) def test_api_bail_on_write_method(read_only_mode, client, method): response = getattr(client, method)(reverse_ns('abusereportuser-list')) assert response.status_code == 503 assert 'website maintenance' in response.json()['error']
bsd-3-clause
arbrandes/edx-platform
common/djangoapps/student/tests/test_receivers.py
3
1701
""" Tests for student signal receivers. """ from edx_toggles.toggles.testutils import override_waffle_flag from lms.djangoapps.courseware.toggles import COURSEWARE_MICROFRONTEND_PROGRESS_MILESTONES from common.djangoapps.student.models import CourseEnrollmentCelebration from common.djangoapps.student.tests.factories import CourseEnrollmentFactory from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase class ReceiversTest(SharedModuleStoreTestCase): """ Tests for dashboard utility functions """ @override_waffle_flag(COURSEWARE_MICROFRONTEND_PROGRESS_MILESTONES, active=True) def test_celebration_created(self): """ Test that we make celebration objects when enrollments are created """ assert CourseEnrollmentCelebration.objects.count() == 0 # Test initial creation upon an enrollment being made enrollment = CourseEnrollmentFactory() assert CourseEnrollmentCelebration.objects.count() == 1 celebration = CourseEnrollmentCelebration.objects.get(enrollment=enrollment, celebrate_first_section=True) # Test nothing changes if we update that enrollment celebration.celebrate_first_section = False celebration.save() enrollment.mode = 'test-mode' enrollment.save() assert CourseEnrollmentCelebration.objects.count() == 1 CourseEnrollmentCelebration.objects.get(enrollment=enrollment, celebrate_first_section=False) def test_celebration_gated_by_waffle(self): """ Test we don't make a celebration if the MFE redirect waffle flag is off """ CourseEnrollmentFactory() assert CourseEnrollmentCelebration.objects.count() == 0
agpl-3.0
aveshagarwal/cluster-capacity
vendor/k8s.io/kubernetes/examples/cluster-dns/images/backend/server.py
504
1293
#!/usr/bin/env python # Copyright 2015 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer PORT_NUMBER = 8000 # This class will handles any incoming request. class HTTPHandler(BaseHTTPRequestHandler): # Handler for the GET requests def do_GET(self): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() self.wfile.write("Hello World!") try: # Create a web server and define the handler to manage the incoming request. server = HTTPServer(('', PORT_NUMBER), HTTPHandler) print 'Started httpserver on port ' , PORT_NUMBER server.serve_forever() except KeyboardInterrupt: print '^C received, shutting down the web server' server.socket.close()
apache-2.0
c22n/ion-channel-ABC
docs/examples/human-atrial/data/ito/Shibata1989/data_Shibata1989.py
1
1285
### Digitised data from [Shibata1989] import numpy as np # IV Curves # Steady State Activation def Act_Shibata(): """ Steady-State activation curve [Shibata1989] cf Fig 3a """ x = np.arange(-30, 70, 10).tolist() + [80,] y = np.asarray([0.05186808768647655, 0.12676019804355798, 0.28385270591439526, 0.4992526352308727, 0.7266071584899667, 0.8298926453470465, 0.8973078656494293, 0.9423021428779867, 0.9693629080109075, 0.9949224588787261, 0.9922442666908765]) ylower = np.asarray([0.04290295282036993, 0.05202047876738325, 0.27488757104828865, 0.48878952846308743, 0.7146493221839265, 0.7491772502812752, 0.8883394884198992, 0.9094148506729527, 0.9588998012431224, 0.9814698930344308, 0.9817779175596678]) sem = np.abs(y-ylower) N = 3 sd = np.sqrt(N)*sem return x, y.tolist(), sd.tolist()
gpl-3.0
EvanK/ansible
lib/ansible/modules/network/fortios/fortios_firewall_multicast_address6.py
24
10083
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # the lib use python logging can get it if the following is set in your # Ansible config. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_firewall_multicast_address6 short_description: Configure IPv6 multicast address in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS by allowing the user to configure firewall feature and multicast_address6 category. Examples includes all options and need to be adjusted to datasources before usage. Tested with FOS v6.0.2 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate ip address. required: true username: description: - FortiOS or FortiGate username. required: true password: description: - FortiOS or FortiGate password. default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol type: bool default: true firewall_multicast_address6: description: - Configure IPv6 multicast address. default: null suboptions: state: description: - Indicates whether to create or remove the object choices: - present - absent color: description: - Color of icon on the GUI. comment: description: - Comment. ip6: description: - "IPv6 address prefix (format: xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx/xxx)." name: description: - IPv6 multicast address name. required: true tagging: description: - Config object tagging. suboptions: category: description: - Tag category. Source system.object-tagging.category. name: description: - Tagging entry name. required: true tags: description: - Tags. suboptions: name: description: - Tag name. Source system.object-tagging.tags.name. required: true visibility: description: - Enable/disable visibility of the IPv6 multicast address on the GUI. choices: - enable - disable ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" tasks: - name: Configure IPv6 multicast address. fortios_firewall_multicast_address6: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" firewall_multicast_address6: state: "present" color: "3" comment: "Comment." ip6: "<your_own_value>" name: "default_name_6" tagging: - category: "<your_own_value> (source system.object-tagging.category)" name: "default_name_9" tags: - name: "default_name_11 (source system.object-tagging.tags.name)" visibility: "enable" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule fos = None def login(data): host = data['host'] username = data['username'] password = data['password'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password) def filter_firewall_multicast_address6_data(json): option_list = ['color', 'comment', 'ip6', 'name', 'tagging', 'visibility'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def firewall_multicast_address6(data, fos): vdom = data['vdom'] firewall_multicast_address6_data = data['firewall_multicast_address6'] filtered_data = filter_firewall_multicast_address6_data(firewall_multicast_address6_data) if firewall_multicast_address6_data['state'] == "present": return fos.set('firewall', 'multicast-address6', data=filtered_data, vdom=vdom) elif firewall_multicast_address6_data['state'] == "absent": return fos.delete('firewall', 'multicast-address6', mkey=filtered_data['name'], vdom=vdom) def fortios_firewall(data, fos): login(data) methodlist = ['firewall_multicast_address6'] for method in methodlist: if data[method]: resp = eval(method)(data, fos) break fos.logout() return not resp['status'] == "success", resp['status'] == "success", resp def main(): fields = { "host": {"required": True, "type": "str"}, "username": {"required": True, "type": "str"}, "password": {"required": False, "type": "str", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "firewall_multicast_address6": { "required": False, "type": "dict", "options": { "state": {"required": True, "type": "str", "choices": ["present", "absent"]}, "color": {"required": False, "type": "int"}, "comment": {"required": False, "type": "str"}, "ip6": {"required": False, "type": "str"}, "name": {"required": True, "type": "str"}, "tagging": {"required": False, "type": "list", "options": { "category": {"required": False, "type": "str"}, "name": {"required": True, "type": "str"}, "tags": {"required": False, "type": "list", "options": { "name": {"required": True, "type": "str"} }} }}, "visibility": {"required": False, "type": "str", "choices": ["enable", "disable"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") global fos fos = FortiOSAPI() is_error, has_changed, result = fortios_firewall(module.params, fos) if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
gpl-3.0
mozilla/stoneridge
python/src/Lib/distutils/command/install_egg_info.py
438
2587
"""distutils.command.install_egg_info Implements the Distutils 'install_egg_info' command, for installing a package's PKG-INFO metadata.""" from distutils.cmd import Command from distutils import log, dir_util import os, sys, re class install_egg_info(Command): """Install an .egg-info file for the package""" description = "Install package's PKG-INFO metadata as an .egg-info file" user_options = [ ('install-dir=', 'd', "directory to install to"), ] def initialize_options(self): self.install_dir = None def finalize_options(self): self.set_undefined_options('install_lib',('install_dir','install_dir')) basename = "%s-%s-py%s.egg-info" % ( to_filename(safe_name(self.distribution.get_name())), to_filename(safe_version(self.distribution.get_version())), sys.version[:3] ) self.target = os.path.join(self.install_dir, basename) self.outputs = [self.target] def run(self): target = self.target if os.path.isdir(target) and not os.path.islink(target): dir_util.remove_tree(target, dry_run=self.dry_run) elif os.path.exists(target): self.execute(os.unlink,(self.target,),"Removing "+target) elif not os.path.isdir(self.install_dir): self.execute(os.makedirs, (self.install_dir,), "Creating "+self.install_dir) log.info("Writing %s", target) if not self.dry_run: f = open(target, 'w') self.distribution.metadata.write_pkg_file(f) f.close() def get_outputs(self): return self.outputs # The following routines are taken from setuptools' pkg_resources module and # can be replaced by importing them from pkg_resources once it is included # in the stdlib. def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """Convert an arbitrary string to a standard version string Spaces become dots, and all other non-alphanumeric characters become dashes, with runs of multiple dashes condensed to a single dash. """ version = version.replace(' ','.') return re.sub('[^A-Za-z0-9.]+', '-', version) def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-','_')
mpl-2.0
dcosentino/edx-platform
lms/djangoapps/lms_xblock/runtime.py
7
7991
""" Module implementing `xblock.runtime.Runtime` functionality for the LMS """ import re import xblock.reference.plugins from django.core.urlresolvers import reverse from django.conf import settings from lms.djangoapps.lms_xblock.models import XBlockAsidesConfig from openedx.core.djangoapps.user_api.api import course_tag as user_course_tag_api from xmodule.modulestore.django import modulestore from xmodule.library_tools import LibraryToolsService from xmodule.x_module import ModuleSystem from xmodule.partitions.partitions_service import PartitionService def _quote_slashes(match): """ Helper function for `quote_slashes` """ matched = match.group(0) # We have to escape ';', because that is our # escape sequence identifier (otherwise, the escaping) # couldn't distinguish between us adding ';_' to the string # and ';_' appearing naturally in the string if matched == ';': return ';;' elif matched == '/': return ';_' else: return matched def quote_slashes(text): """ Quote '/' characters so that they aren't visible to django's url quoting, unquoting, or url regex matching. Escapes '/'' to the sequence ';_', and ';' to the sequence ';;'. By making the escape sequence fixed length, and escaping identifier character ';', we are able to reverse the escaping. """ return re.sub(ur'[;/]', _quote_slashes, text) def _unquote_slashes(match): """ Helper function for `unquote_slashes` """ matched = match.group(0) if matched == ';;': return ';' elif matched == ';_': return '/' else: return matched def unquote_slashes(text): """ Unquote slashes quoted by `quote_slashes` """ return re.sub(r'(;;|;_)', _unquote_slashes, text) class LmsHandlerUrls(object): """ A runtime mixin that provides a handler_url function that routes to the LMS' xblock handler view. This must be mixed in to a runtime that already accepts and stores a course_id """ # pylint: disable=unused-argument # pylint: disable=no-member def handler_url(self, block, handler_name, suffix='', query='', thirdparty=False): """See :method:`xblock.runtime:Runtime.handler_url`""" view_name = 'xblock_handler' if handler_name: # Be sure this is really a handler. func = getattr(block, handler_name, None) if not func: raise ValueError("{!r} is not a function name".format(handler_name)) if not getattr(func, "_is_xblock_handler", False): raise ValueError("{!r} is not a handler name".format(handler_name)) if thirdparty: view_name = 'xblock_handler_noauth' url = reverse(view_name, kwargs={ 'course_id': unicode(self.course_id), 'usage_id': quote_slashes(unicode(block.scope_ids.usage_id).encode('utf-8')), 'handler': handler_name, 'suffix': suffix, }) # If suffix is an empty string, remove the trailing '/' if not suffix: url = url.rstrip('/') # If there is a query string, append it if query: url += '?' + query # If third-party, return fully-qualified url if thirdparty: scheme = "https" if settings.HTTPS == "on" else "http" url = '{scheme}://{host}{path}'.format( scheme=scheme, host=settings.SITE_NAME, path=url ) return url def local_resource_url(self, block, uri): """ local_resource_url for Studio """ return reverse('xblock_resource_url', kwargs={ 'block_type': block.scope_ids.block_type, 'uri': uri, }) class LmsPartitionService(PartitionService): """ Another runtime mixin that provides access to the student partitions defined on the course. (If and when XBlock directly provides access from one block (e.g. a split_test_module) to another (e.g. a course_module), this won't be necessary, but for now it seems like the least messy way to hook things through) """ @property def course_partitions(self): course = modulestore().get_course(self._course_id) return course.user_partitions class UserTagsService(object): """ A runtime class that provides an interface to the user service. It handles filling in the current course id and current user. """ COURSE_SCOPE = user_course_tag_api.COURSE_SCOPE def __init__(self, runtime): self.runtime = runtime def _get_current_user(self): """Returns the real, not anonymized, current user.""" real_user = self.runtime.get_real_user(self.runtime.anonymous_student_id) return real_user def get_tag(self, scope, key): """ Get a user tag for the current course and the current user for a given key scope: the current scope of the runtime key: the key for the value we want """ if scope != user_course_tag_api.COURSE_SCOPE: raise ValueError("unexpected scope {0}".format(scope)) return user_course_tag_api.get_course_tag( self._get_current_user(), self.runtime.course_id, key ) def set_tag(self, scope, key, value): """ Set the user tag for the current course and the current user for a given key scope: the current scope of the runtime key: the key that to the value to be set value: the value to set """ if scope != user_course_tag_api.COURSE_SCOPE: raise ValueError("unexpected scope {0}".format(scope)) return user_course_tag_api.set_course_tag( self._get_current_user(), self.runtime.course_id, key, value ) class LmsModuleSystem(LmsHandlerUrls, ModuleSystem): # pylint: disable=abstract-method """ ModuleSystem specialized to the LMS """ def __init__(self, **kwargs): services = kwargs.setdefault('services', {}) services['user_tags'] = UserTagsService(self) services['partitions'] = LmsPartitionService( user=kwargs.get('user'), course_id=kwargs.get('course_id'), track_function=kwargs.get('track_function', None), ) services['library_tools'] = LibraryToolsService(modulestore()) services['fs'] = xblock.reference.plugins.FSService() self.request_token = kwargs.pop('request_token', None) super(LmsModuleSystem, self).__init__(**kwargs) def wrap_aside(self, block, aside, view, frag, context): """ Creates a div which identifies the aside, points to the original block, and writes out the json_init_args into a script tag. The default implementation creates a frag to wraps frag w/ a div identifying the xblock. If you have javascript, you'll need to override this impl """ extra_data = { 'block-id': quote_slashes(unicode(block.scope_ids.usage_id)), 'url-selector': 'asideBaseUrl', 'runtime-class': 'LmsRuntime', } if self.request_token: extra_data['request-token'] = self.request_token return self._wrap_ele( aside, view, frag, extra_data, ) def applicable_aside_types(self, block): """ Return all of the asides which might be decorating this `block`. Arguments: block (:class:`.XBlock`): The block to render retrieve asides for. """ config = XBlockAsidesConfig.current() if not config.enabled: return [] if block.scope_ids.block_type in config.disabled_blocks.split(): return [] return super(LmsModuleSystem, self).applicable_aside_types()
agpl-3.0
tejasnikumbh/AllSAT
lib/python2.7/site-packages/numpy/distutils/command/build_src.py
141
32258
""" Build swig, f2py, pyrex sources. """ from __future__ import division, absolute_import, print_function import os import re import sys import shlex import copy from distutils.command import build_ext from distutils.dep_util import newer_group, newer from distutils.util import get_platform from distutils.errors import DistutilsError, DistutilsSetupError def have_pyrex(): try: import Pyrex.Compiler.Main return True except ImportError: return False # this import can't be done here, as it uses numpy stuff only available # after it's installed #import numpy.f2py from numpy.distutils import log from numpy.distutils.misc_util import fortran_ext_match, \ appendpath, is_string, is_sequence, get_cmd from numpy.distutils.from_template import process_file as process_f_file from numpy.distutils.conv_template import process_file as process_c_file def subst_vars(target, source, d): """Substitute any occurence of @foo@ by d['foo'] from source file into target.""" var = re.compile('@([a-zA-Z_]+)@') fs = open(source, 'r') try: ft = open(target, 'w') try: for l in fs: m = var.search(l) if m: ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) else: ft.write(l) finally: ft.close() finally: fs.close() class build_src(build_ext.build_ext): description = "build sources from SWIG, F2PY files or a function" user_options = [ ('build-src=', 'd', "directory to \"build\" sources to"), ('f2py-opts=', None, "list of f2py command line options"), ('swig=', None, "path to the SWIG executable"), ('swig-opts=', None, "list of SWIG command line options"), ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete ('force', 'f', "forcibly build everything (ignore file timestamps)"), ('inplace', 'i', "ignore build-lib and put compiled extensions into the source " + "directory alongside your pure Python modules"), ] boolean_options = ['force', 'inplace'] help_options = [] def initialize_options(self): self.extensions = None self.package = None self.py_modules = None self.py_modules_dict = None self.build_src = None self.build_lib = None self.build_base = None self.force = None self.inplace = None self.package_dir = None self.f2pyflags = None # obsolete self.f2py_opts = None self.swigflags = None # obsolete self.swig_opts = None self.swig_cpp = None self.swig = None def finalize_options(self): self.set_undefined_options('build', ('build_base', 'build_base'), ('build_lib', 'build_lib'), ('force', 'force')) if self.package is None: self.package = self.distribution.ext_package self.extensions = self.distribution.ext_modules self.libraries = self.distribution.libraries or [] self.py_modules = self.distribution.py_modules or [] self.data_files = self.distribution.data_files or [] if self.build_src is None: plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) # py_modules_dict is used in build_py.find_package_modules self.py_modules_dict = {} if self.f2pyflags: if self.f2py_opts: log.warn('ignoring --f2pyflags as --f2py-opts already used') else: self.f2py_opts = self.f2pyflags self.f2pyflags = None if self.f2py_opts is None: self.f2py_opts = [] else: self.f2py_opts = shlex.split(self.f2py_opts) if self.swigflags: if self.swig_opts: log.warn('ignoring --swigflags as --swig-opts already used') else: self.swig_opts = self.swigflags self.swigflags = None if self.swig_opts is None: self.swig_opts = [] else: self.swig_opts = shlex.split(self.swig_opts) # use options from build_ext command build_ext = self.get_finalized_command('build_ext') if self.inplace is None: self.inplace = build_ext.inplace if self.swig_cpp is None: self.swig_cpp = build_ext.swig_cpp for c in ['swig', 'swig_opt']: o = '--'+c.replace('_', '-') v = getattr(build_ext, c, None) if v: if getattr(self, c): log.warn('both build_src and build_ext define %s option' % (o)) else: log.info('using "%s=%s" option from build_ext command' % (o, v)) setattr(self, c, v) def run(self): log.info("build_src") if not (self.extensions or self.libraries): return self.build_sources() def build_sources(self): if self.inplace: self.get_package_dir = \ self.get_finalized_command('build_py').get_package_dir self.build_py_modules_sources() for libname_info in self.libraries: self.build_library_sources(*libname_info) if self.extensions: self.check_extensions_list(self.extensions) for ext in self.extensions: self.build_extension_sources(ext) self.build_data_files_sources() self.build_npy_pkg_config() def build_data_files_sources(self): if not self.data_files: return log.info('building data_files sources') from numpy.distutils.misc_util import get_data_files new_data_files = [] for data in self.data_files: if isinstance(data, str): new_data_files.append(data) elif isinstance(data, tuple): d, files = data if self.inplace: build_dir = self.get_package_dir('.'.join(d.split(os.sep))) else: build_dir = os.path.join(self.build_src, d) funcs = [f for f in files if hasattr(f, '__call__')] files = [f for f in files if not hasattr(f, '__call__')] for f in funcs: if f.__code__.co_argcount==1: s = f(build_dir) else: s = f() if s is not None: if isinstance(s, list): files.extend(s) elif isinstance(s, str): files.append(s) else: raise TypeError(repr(s)) filenames = get_data_files((d, files)) new_data_files.append((d, filenames)) else: raise TypeError(repr(data)) self.data_files[:] = new_data_files def _build_npy_pkg_config(self, info, gd): import shutil template, install_dir, subst_dict = info template_dir = os.path.dirname(template) for k, v in gd.items(): subst_dict[k] = v if self.inplace == 1: generated_dir = os.path.join(template_dir, install_dir) else: generated_dir = os.path.join(self.build_src, template_dir, install_dir) generated = os.path.basename(os.path.splitext(template)[0]) generated_path = os.path.join(generated_dir, generated) if not os.path.exists(generated_dir): os.makedirs(generated_dir) subst_vars(generated_path, template, subst_dict) # Where to install relatively to install prefix full_install_dir = os.path.join(template_dir, install_dir) return full_install_dir, generated_path def build_npy_pkg_config(self): log.info('build_src: building npy-pkg config files') # XXX: another ugly workaround to circumvent distutils brain damage. We # need the install prefix here, but finalizing the options of the # install command when only building sources cause error. Instead, we # copy the install command instance, and finalize the copy so that it # does not disrupt how distutils want to do things when with the # original install command instance. install_cmd = copy.copy(get_cmd('install')) if not install_cmd.finalized == 1: install_cmd.finalize_options() build_npkg = False gd = {} if self.inplace == 1: top_prefix = '.' build_npkg = True elif hasattr(install_cmd, 'install_libbase'): top_prefix = install_cmd.install_libbase build_npkg = True if build_npkg: for pkg, infos in self.distribution.installed_pkg_config.items(): pkg_path = self.distribution.package_dir[pkg] prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) d = {'prefix': prefix} for info in infos: install_dir, generated = self._build_npy_pkg_config(info, d) self.distribution.data_files.append((install_dir, [generated])) def build_py_modules_sources(self): if not self.py_modules: return log.info('building py_modules sources') new_py_modules = [] for source in self.py_modules: if is_sequence(source) and len(source)==3: package, module_base, source = source if self.inplace: build_dir = self.get_package_dir(package) else: build_dir = os.path.join(self.build_src, os.path.join(*package.split('.'))) if hasattr(source, '__call__'): target = os.path.join(build_dir, module_base + '.py') source = source(target) if source is None: continue modules = [(package, module_base, source)] if package not in self.py_modules_dict: self.py_modules_dict[package] = [] self.py_modules_dict[package] += modules else: new_py_modules.append(source) self.py_modules[:] = new_py_modules def build_library_sources(self, lib_name, build_info): sources = list(build_info.get('sources', [])) if not sources: return log.info('building library "%s" sources' % (lib_name)) sources = self.generate_sources(sources, (lib_name, build_info)) sources = self.template_sources(sources, (lib_name, build_info)) sources, h_files = self.filter_h_files(sources) if h_files: log.info('%s - nothing done with h_files = %s', self.package, h_files) #for f in h_files: # self.distribution.headers.append((lib_name,f)) build_info['sources'] = sources return def build_extension_sources(self, ext): sources = list(ext.sources) log.info('building extension "%s" sources' % (ext.name)) fullname = self.get_ext_fullname(ext.name) modpath = fullname.split('.') package = '.'.join(modpath[0:-1]) if self.inplace: self.ext_target_dir = self.get_package_dir(package) sources = self.generate_sources(sources, ext) sources = self.template_sources(sources, ext) sources = self.swig_sources(sources, ext) sources = self.f2py_sources(sources, ext) sources = self.pyrex_sources(sources, ext) sources, py_files = self.filter_py_files(sources) if package not in self.py_modules_dict: self.py_modules_dict[package] = [] modules = [] for f in py_files: module = os.path.splitext(os.path.basename(f))[0] modules.append((package, module, f)) self.py_modules_dict[package] += modules sources, h_files = self.filter_h_files(sources) if h_files: log.info('%s - nothing done with h_files = %s', package, h_files) #for f in h_files: # self.distribution.headers.append((package,f)) ext.sources = sources def generate_sources(self, sources, extension): new_sources = [] func_sources = [] for source in sources: if is_string(source): new_sources.append(source) else: func_sources.append(source) if not func_sources: return new_sources if self.inplace and not is_sequence(extension): build_dir = self.ext_target_dir else: if is_sequence(extension): name = extension[0] # if 'include_dirs' not in extension[1]: # extension[1]['include_dirs'] = [] # incl_dirs = extension[1]['include_dirs'] else: name = extension.name # incl_dirs = extension.include_dirs #if self.build_src not in incl_dirs: # incl_dirs.append(self.build_src) build_dir = os.path.join(*([self.build_src]\ +name.split('.')[:-1])) self.mkpath(build_dir) for func in func_sources: source = func(extension, build_dir) if not source: continue if is_sequence(source): [log.info(" adding '%s' to sources." % (s,)) for s in source] new_sources.extend(source) else: log.info(" adding '%s' to sources." % (source,)) new_sources.append(source) return new_sources def filter_py_files(self, sources): return self.filter_files(sources, ['.py']) def filter_h_files(self, sources): return self.filter_files(sources, ['.h', '.hpp', '.inc']) def filter_files(self, sources, exts = []): new_sources = [] files = [] for source in sources: (base, ext) = os.path.splitext(source) if ext in exts: files.append(source) else: new_sources.append(source) return new_sources, files def template_sources(self, sources, extension): new_sources = [] if is_sequence(extension): depends = extension[1].get('depends') include_dirs = extension[1].get('include_dirs') else: depends = extension.depends include_dirs = extension.include_dirs for source in sources: (base, ext) = os.path.splitext(source) if ext == '.src': # Template file if self.inplace: target_dir = os.path.dirname(base) else: target_dir = appendpath(self.build_src, os.path.dirname(base)) self.mkpath(target_dir) target_file = os.path.join(target_dir, os.path.basename(base)) if (self.force or newer_group([source] + depends, target_file)): if _f_pyf_ext_match(base): log.info("from_template:> %s" % (target_file)) outstr = process_f_file(source) else: log.info("conv_template:> %s" % (target_file)) outstr = process_c_file(source) fid = open(target_file, 'w') fid.write(outstr) fid.close() if _header_ext_match(target_file): d = os.path.dirname(target_file) if d not in include_dirs: log.info(" adding '%s' to include_dirs." % (d)) include_dirs.append(d) new_sources.append(target_file) else: new_sources.append(source) return new_sources def pyrex_sources(self, sources, extension): new_sources = [] ext_name = extension.name.split('.')[-1] for source in sources: (base, ext) = os.path.splitext(source) if ext == '.pyx': target_file = self.generate_a_pyrex_source(base, ext_name, source, extension) new_sources.append(target_file) else: new_sources.append(source) return new_sources def generate_a_pyrex_source(self, base, ext_name, source, extension): if self.inplace or not have_pyrex(): target_dir = os.path.dirname(base) else: target_dir = appendpath(self.build_src, os.path.dirname(base)) target_file = os.path.join(target_dir, ext_name + '.c') depends = [source] + extension.depends if self.force or newer_group(depends, target_file, 'newer'): if have_pyrex(): import Pyrex.Compiler.Main log.info("pyrexc:> %s" % (target_file)) self.mkpath(target_dir) options = Pyrex.Compiler.Main.CompilationOptions( defaults=Pyrex.Compiler.Main.default_options, include_path=extension.include_dirs, output_file=target_file) pyrex_result = Pyrex.Compiler.Main.compile(source, options=options) if pyrex_result.num_errors != 0: raise DistutilsError("%d errors while compiling %r with Pyrex" \ % (pyrex_result.num_errors, source)) elif os.path.isfile(target_file): log.warn("Pyrex required for compiling %r but not available,"\ " using old target %r"\ % (source, target_file)) else: raise DistutilsError("Pyrex required for compiling %r"\ " but notavailable" % (source,)) return target_file def f2py_sources(self, sources, extension): new_sources = [] f2py_sources = [] f_sources = [] f2py_targets = {} target_dirs = [] ext_name = extension.name.split('.')[-1] skip_f2py = 0 for source in sources: (base, ext) = os.path.splitext(source) if ext == '.pyf': # F2PY interface file if self.inplace: target_dir = os.path.dirname(base) else: target_dir = appendpath(self.build_src, os.path.dirname(base)) if os.path.isfile(source): name = get_f2py_modulename(source) if name != ext_name: raise DistutilsSetupError('mismatch of extension names: %s ' 'provides %r but expected %r' % ( source, name, ext_name)) target_file = os.path.join(target_dir, name+'module.c') else: log.debug(' source %s does not exist: skipping f2py\'ing.' \ % (source)) name = ext_name skip_f2py = 1 target_file = os.path.join(target_dir, name+'module.c') if not os.path.isfile(target_file): log.warn(' target %s does not exist:\n '\ 'Assuming %smodule.c was generated with '\ '"build_src --inplace" command.' \ % (target_file, name)) target_dir = os.path.dirname(base) target_file = os.path.join(target_dir, name+'module.c') if not os.path.isfile(target_file): raise DistutilsSetupError("%r missing" % (target_file,)) log.info(' Yes! Using %r as up-to-date target.' \ % (target_file)) target_dirs.append(target_dir) f2py_sources.append(source) f2py_targets[source] = target_file new_sources.append(target_file) elif fortran_ext_match(ext): f_sources.append(source) else: new_sources.append(source) if not (f2py_sources or f_sources): return new_sources for d in target_dirs: self.mkpath(d) f2py_options = extension.f2py_options + self.f2py_opts if self.distribution.libraries: for name, build_info in self.distribution.libraries: if name in extension.libraries: f2py_options.extend(build_info.get('f2py_options', [])) log.info("f2py options: %s" % (f2py_options)) if f2py_sources: if len(f2py_sources) != 1: raise DistutilsSetupError( 'only one .pyf file is allowed per extension module but got'\ ' more: %r' % (f2py_sources,)) source = f2py_sources[0] target_file = f2py_targets[source] target_dir = os.path.dirname(target_file) or '.' depends = [source] + extension.depends if (self.force or newer_group(depends, target_file, 'newer')) \ and not skip_f2py: log.info("f2py: %s" % (source)) import numpy.f2py numpy.f2py.run_main(f2py_options + ['--build-dir', target_dir, source]) else: log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) else: #XXX TODO: --inplace support for sdist command if is_sequence(extension): name = extension[0] else: name = extension.name target_dir = os.path.join(*([self.build_src]\ +name.split('.')[:-1])) target_file = os.path.join(target_dir, ext_name + 'module.c') new_sources.append(target_file) depends = f_sources + extension.depends if (self.force or newer_group(depends, target_file, 'newer')) \ and not skip_f2py: log.info("f2py:> %s" % (target_file)) self.mkpath(target_dir) import numpy.f2py numpy.f2py.run_main(f2py_options + ['--lower', '--build-dir', target_dir]+\ ['-m', ext_name]+f_sources) else: log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ % (target_file)) if not os.path.isfile(target_file): raise DistutilsError("f2py target file %r not generated" % (target_file,)) target_c = os.path.join(self.build_src, 'fortranobject.c') target_h = os.path.join(self.build_src, 'fortranobject.h') log.info(" adding '%s' to sources." % (target_c)) new_sources.append(target_c) if self.build_src not in extension.include_dirs: log.info(" adding '%s' to include_dirs." \ % (self.build_src)) extension.include_dirs.append(self.build_src) if not skip_f2py: import numpy.f2py d = os.path.dirname(numpy.f2py.__file__) source_c = os.path.join(d, 'src', 'fortranobject.c') source_h = os.path.join(d, 'src', 'fortranobject.h') if newer(source_c, target_c) or newer(source_h, target_h): self.mkpath(os.path.dirname(target_c)) self.copy_file(source_c, target_c) self.copy_file(source_h, target_h) else: if not os.path.isfile(target_c): raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) if not os.path.isfile(target_h): raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: filename = os.path.join(target_dir, ext_name + name_ext) if os.path.isfile(filename): log.info(" adding '%s' to sources." % (filename)) f_sources.append(filename) return new_sources + f_sources def swig_sources(self, sources, extension): # Assuming SWIG 1.3.14 or later. See compatibility note in # http://www.swig.org/Doc1.3/Python.html#Python_nn6 new_sources = [] swig_sources = [] swig_targets = {} target_dirs = [] py_files = [] # swig generated .py files target_ext = '.c' if '-c++' in extension.swig_opts: typ = 'c++' is_cpp = True extension.swig_opts.remove('-c++') elif self.swig_cpp: typ = 'c++' is_cpp = True else: typ = None is_cpp = False skip_swig = 0 ext_name = extension.name.split('.')[-1] for source in sources: (base, ext) = os.path.splitext(source) if ext == '.i': # SWIG interface file # the code below assumes that the sources list # contains not more than one .i SWIG interface file if self.inplace: target_dir = os.path.dirname(base) py_target_dir = self.ext_target_dir else: target_dir = appendpath(self.build_src, os.path.dirname(base)) py_target_dir = target_dir if os.path.isfile(source): name = get_swig_modulename(source) if name != ext_name[1:]: raise DistutilsSetupError( 'mismatch of extension names: %s provides %r' ' but expected %r' % (source, name, ext_name[1:])) if typ is None: typ = get_swig_target(source) is_cpp = typ=='c++' else: typ2 = get_swig_target(source) if typ2 is None: log.warn('source %r does not define swig target, assuming %s swig target' \ % (source, typ)) elif typ!=typ2: log.warn('expected %r but source %r defines %r swig target' \ % (typ, source, typ2)) if typ2=='c++': log.warn('resetting swig target to c++ (some targets may have .c extension)') is_cpp = True else: log.warn('assuming that %r has c++ swig target' % (source)) if is_cpp: target_ext = '.cpp' target_file = os.path.join(target_dir, '%s_wrap%s' \ % (name, target_ext)) else: log.warn(' source %s does not exist: skipping swig\'ing.' \ % (source)) name = ext_name[1:] skip_swig = 1 target_file = _find_swig_target(target_dir, name) if not os.path.isfile(target_file): log.warn(' target %s does not exist:\n '\ 'Assuming %s_wrap.{c,cpp} was generated with '\ '"build_src --inplace" command.' \ % (target_file, name)) target_dir = os.path.dirname(base) target_file = _find_swig_target(target_dir, name) if not os.path.isfile(target_file): raise DistutilsSetupError("%r missing" % (target_file,)) log.warn(' Yes! Using %r as up-to-date target.' \ % (target_file)) target_dirs.append(target_dir) new_sources.append(target_file) py_files.append(os.path.join(py_target_dir, name+'.py')) swig_sources.append(source) swig_targets[source] = new_sources[-1] else: new_sources.append(source) if not swig_sources: return new_sources if skip_swig: return new_sources + py_files for d in target_dirs: self.mkpath(d) swig = self.swig or self.find_swig() swig_cmd = [swig, "-python"] + extension.swig_opts if is_cpp: swig_cmd.append('-c++') for d in extension.include_dirs: swig_cmd.append('-I'+d) for source in swig_sources: target = swig_targets[source] depends = [source] + extension.depends if self.force or newer_group(depends, target, 'newer'): log.info("%s: %s" % (os.path.basename(swig) \ + (is_cpp and '++' or ''), source)) self.spawn(swig_cmd + self.swig_opts \ + ["-o", target, '-outdir', py_target_dir, source]) else: log.debug(" skipping '%s' swig interface (up-to-date)" \ % (source)) return new_sources + py_files _f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match _header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z', re.I).match #### SWIG related auxiliary functions #### _swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P<package>[\w_]+)".*\)|)\s*(?P<name>[\w_]+)', re.I).match _has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search _has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search def get_swig_target(source): f = open(source, 'r') result = None line = f.readline() if _has_cpp_header(line): result = 'c++' if _has_c_header(line): result = 'c' f.close() return result def get_swig_modulename(source): f = open(source, 'r') name = None for line in f: m = _swig_module_name_match(line) if m: name = m.group('name') break f.close() return name def _find_swig_target(target_dir, name): for ext in ['.cpp', '.c']: target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) if os.path.isfile(target): break return target #### F2PY related auxiliary functions #### _f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)', re.I).match _f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'\ '__user__[\w_]*)', re.I).match def get_f2py_modulename(source): name = None f = open(source) for line in f: m = _f2py_module_name_match(line) if m: if _f2py_user_module_name_match(line): # skip *__user__* names continue name = m.group('name') break f.close() return name ##########################################
mit
pxzhenren/flask
tests/test_user_error_handler.py
150
3483
# -*- coding: utf-8 -*- from werkzeug.exceptions import Forbidden, InternalServerError import flask def test_error_handler_no_match(): app = flask.Flask(__name__) class CustomException(Exception): pass @app.errorhandler(CustomException) def custom_exception_handler(e): assert isinstance(e, CustomException) return 'custom' @app.errorhandler(500) def handle_500(e): return type(e).__name__ @app.route('/custom') def custom_test(): raise CustomException() @app.route('/keyerror') def key_error(): raise KeyError() c = app.test_client() assert c.get('/custom').data == b'custom' assert c.get('/keyerror').data == b'KeyError' def test_error_handler_subclass(): app = flask.Flask(__name__) class ParentException(Exception): pass class ChildExceptionUnregistered(ParentException): pass class ChildExceptionRegistered(ParentException): pass @app.errorhandler(ParentException) def parent_exception_handler(e): assert isinstance(e, ParentException) return 'parent' @app.errorhandler(ChildExceptionRegistered) def child_exception_handler(e): assert isinstance(e, ChildExceptionRegistered) return 'child-registered' @app.route('/parent') def parent_test(): raise ParentException() @app.route('/child-unregistered') def unregistered_test(): raise ChildExceptionUnregistered() @app.route('/child-registered') def registered_test(): raise ChildExceptionRegistered() c = app.test_client() assert c.get('/parent').data == b'parent' assert c.get('/child-unregistered').data == b'parent' assert c.get('/child-registered').data == b'child-registered' def test_error_handler_http_subclass(): app = flask.Flask(__name__) class ForbiddenSubclassRegistered(Forbidden): pass class ForbiddenSubclassUnregistered(Forbidden): pass @app.errorhandler(403) def code_exception_handler(e): assert isinstance(e, Forbidden) return 'forbidden' @app.errorhandler(ForbiddenSubclassRegistered) def subclass_exception_handler(e): assert isinstance(e, ForbiddenSubclassRegistered) return 'forbidden-registered' @app.route('/forbidden') def forbidden_test(): raise Forbidden() @app.route('/forbidden-registered') def registered_test(): raise ForbiddenSubclassRegistered() @app.route('/forbidden-unregistered') def unregistered_test(): raise ForbiddenSubclassUnregistered() c = app.test_client() assert c.get('/forbidden').data == b'forbidden' assert c.get('/forbidden-unregistered').data == b'forbidden' assert c.get('/forbidden-registered').data == b'forbidden-registered' def test_error_handler_blueprint(): bp = flask.Blueprint('bp', __name__) @bp.errorhandler(500) def bp_exception_handler(e): return 'bp-error' @bp.route('/error') def bp_test(): raise InternalServerError() app = flask.Flask(__name__) @app.errorhandler(500) def app_exception_handler(e): return 'app-error' @app.route('/error') def app_test(): raise InternalServerError() app.register_blueprint(bp, url_prefix='/bp') c = app.test_client() assert c.get('/error').data == b'app-error' assert c.get('/bp/error').data == b'bp-error'
bsd-3-clause
EdPassos/fofix
src/core/Task.py
7
1642
##################################################################### # -*- coding: iso-8859-1 -*- # # # # Frets on Fire # # Copyright (C) 2006 Sami Kyöstilä # # # # This program is free software; you can redistribute it and/or # # modify it under the terms of the GNU General Public License # # as published by the Free Software Foundation; either version 2 # # of the License, or (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program; if not, write to the Free Software # # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # # MA 02110-1301, USA. # ##################################################################### class Task(object): def __init__(self): pass def started(self): pass def stopped(self): pass def run(self, ticks): pass
gpl-2.0
daviddoria/PointGraphsPhase1
Examples/Rendering/Python/rainbow.py
15
3096
#!/usr/bin/env python # This example demonstrates the use and manipulation of lookup tables. import vtk from vtk.util.colors import * from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # First create pipeline a simple pipeline that reads a structure grid # and then extracts a plane from the grid. The plane will be colored # differently by using different lookup tables. # # Note: the Update method is manually invoked because it causes the # reader to read; later on we use the output of the reader to set # a range for the scalar values. pl3d = vtk.vtkPLOT3DReader() pl3d.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin") pl3d.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin") pl3d.SetScalarFunctionNumber(100) pl3d.SetVectorFunctionNumber(202) pl3d.Update() plane = vtk.vtkStructuredGridGeometryFilter() plane.SetInputConnection(pl3d.GetOutputPort()) plane.SetExtent(1, 100, 1, 100, 7, 7) lut = vtk.vtkLookupTable() planeMapper = vtk.vtkPolyDataMapper() planeMapper.SetLookupTable(lut) planeMapper.SetInputConnection(plane.GetOutputPort()) planeMapper.SetScalarRange(pl3d.GetOutput().GetScalarRange()) planeActor = vtk.vtkActor() planeActor.SetMapper(planeMapper) # This creates an outline around the data. outline = vtk.vtkStructuredGridOutlineFilter() outline.SetInputConnection(pl3d.GetOutputPort()) outlineMapper = vtk.vtkPolyDataMapper() outlineMapper.SetInputConnection(outline.GetOutputPort()) outlineActor = vtk.vtkActor() outlineActor.SetMapper(outlineMapper) # Much of the following is commented out. To try different lookup tables, # uncommented the appropriate portions. # This creates a black to white lut. ##lut.SetHueRange(0, 0) ##lut.SetSaturationRange(0, 0) ##lut.SetValueRange(0.2, 1.0) # This creates a red to blue lut. ##lut.SetHueRange(0.0, 0.667) # This creates a blue to red lut. ##lut.SetHueRange(0.667, 0.0) # This creates a wierd effect. The Build() method causes the lookup # table to allocate memory and create a table based on the currect # hue, saturation, value, and alpha (transparency) range. Here we then # manually overwrite the values generated by the Build() method. lut.SetNumberOfColors(256) lut.Build() for i in range(0, 16): lut.SetTableValue(i*16, red[0], red[1], red[2], 1) lut.SetTableValue(i*16+1, green[0], green[1], green[2], 1) lut.SetTableValue(i*16+2, blue[0], blue[1], blue[2], 1) lut.SetTableValue(i*16+3, black[0], black[1], black[2], 1) # Create the RenderWindow, Renderer and both Actors ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # Add the actors to the renderer, set the background and size ren.AddActor(outlineActor) ren.AddActor(planeActor) ren.SetBackground(0.1, 0.2, 0.4) ren.TwoSidedLightingOff() renWin.SetSize(250, 250) cam1 = ren.GetActiveCamera() cam1.SetClippingRange(3.95297, 50) cam1.SetFocalPoint(8.88908, 0.595038, 29.3342) cam1.SetPosition(-12.3332, 31.7479, 41.2387) cam1.SetViewUp(0.060772, -0.319905, 0.945498) iren.Initialize() renWin.Render() iren.Start()
bsd-3-clause
gsehub/edx-platform
lms/djangoapps/instructor_task/subtasks.py
21
27437
""" This module contains celery task functions for handling the management of subtasks. """ import json import logging from contextlib import contextmanager from time import time from uuid import uuid4 import psutil from celery.states import READY_STATES, RETRY, SUCCESS from django.core.cache import cache from django.db import DatabaseError, transaction import dogstats_wrapper as dog_stats_api from util.db import outer_atomic from .exceptions import DuplicateTaskException from .models import PROGRESS, QUEUING, InstructorTask TASK_LOG = logging.getLogger('edx.celery.task') # Lock expiration should be long enough to allow a subtask to complete. SUBTASK_LOCK_EXPIRE = 60 * 10 # Lock expires in 10 minutes # Number of times to retry if a subtask update encounters a lock on the InstructorTask. # (These are recursive retries, so don't make this number too large.) MAX_DATABASE_LOCK_RETRIES = 5 def _get_number_of_subtasks(total_num_items, items_per_task): """ Determines number of subtasks that would be generated by _generate_items_for_subtask. This needs to be calculated before the query is executed so that the list of all subtasks can be stored in the InstructorTask before any subtasks are started. The number of subtask_id values returned by this should match the number of chunks returned by the generate_items_for_subtask generator. """ num_subtasks, remainder = divmod(total_num_items, items_per_task) if remainder: num_subtasks += 1 return num_subtasks @contextmanager def track_memory_usage(metric, course_id): """ Context manager to track how much memory (in bytes) a given process uses. Metrics will look like: 'course_email.subtask_generation.memory.rss' or 'course_email.subtask_generation.memory.vms'. """ memory_types = ['rss', 'vms'] process = psutil.Process() baseline_memory_info = process.get_memory_info() baseline_usages = [getattr(baseline_memory_info, memory_type) for memory_type in memory_types] yield for memory_type, baseline_usage in zip(memory_types, baseline_usages): total_memory_info = process.get_memory_info() total_usage = getattr(total_memory_info, memory_type) memory_used = total_usage - baseline_usage dog_stats_api.increment( metric + "." + memory_type, memory_used, tags=["course_id:{}".format(course_id)], ) def _generate_items_for_subtask( item_querysets, # pylint: disable=bad-continuation item_fields, total_num_items, items_per_task, total_num_subtasks, course_id, ): """ Generates a chunk of "items" that should be passed into a subtask. Arguments: `item_querysets` : a list of query sets, each of which defines the "items" that should be passed to subtasks. `item_fields` : the fields that should be included in the dict that is returned. These are in addition to the 'pk' field. `total_num_items` : the result of summing the count of each queryset in `item_querysets`. `items_per_query` : size of chunks to break the query operation into. `items_per_task` : maximum size of chunks to break each query chunk into for use by a subtask. `course_id` : course_id of the course. Only needed for the track_memory_usage context manager. Returns: yields a list of dicts, where each dict contains the fields in `item_fields`, plus the 'pk' field. Warning: if the algorithm here changes, the _get_number_of_subtasks() method should similarly be changed. """ num_items_queued = 0 all_item_fields = list(item_fields) all_item_fields.append('pk') num_subtasks = 0 items_for_task = [] with track_memory_usage('course_email.subtask_generation.memory', course_id): for queryset in item_querysets: for item in queryset.values(*all_item_fields).iterator(): if len(items_for_task) == items_per_task and num_subtasks < total_num_subtasks - 1: yield items_for_task num_items_queued += items_per_task items_for_task = [] num_subtasks += 1 items_for_task.append(item) # yield remainder items for task, if any if items_for_task: yield items_for_task num_items_queued += len(items_for_task) # Note, depending on what kind of DB is used, it's possible for the queryset # we iterate over to change in the course of the query. Therefore it's # possible that there are more (or fewer) items queued than were initially # calculated. It also means it's possible that the last task contains # more items than items_per_task allows. We expect this to be a small enough # number as to be negligible. if num_items_queued != total_num_items: TASK_LOG.info("Number of items generated by chunking %s not equal to original total %s", num_items_queued, total_num_items) class SubtaskStatus(object): """ Create and return a dict for tracking the status of a subtask. SubtaskStatus values are: 'task_id' : id of subtask. This is used to pass task information across retries. 'attempted' : number of attempts -- should equal succeeded plus failed 'succeeded' : number that succeeded in processing 'skipped' : number that were not processed. 'failed' : number that failed during processing 'retried_nomax' : number of times the subtask has been retried for conditions that should not have a maximum count applied 'retried_withmax' : number of times the subtask has been retried for conditions that should have a maximum count applied 'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS) Object is not JSON-serializable, so to_dict and from_dict methods are provided so that it can be passed as a serializable argument to tasks (and be reconstituted within such tasks). In future, we may want to include specific error information indicating the reason for failure. Also, we should count up "not attempted" separately from attempted/failed. """ def __init__(self, task_id, attempted=None, succeeded=0, failed=0, skipped=0, retried_nomax=0, retried_withmax=0, state=None): """Construct a SubtaskStatus object.""" self.task_id = task_id if attempted is not None: self.attempted = attempted else: self.attempted = succeeded + failed self.succeeded = succeeded self.failed = failed self.skipped = skipped self.retried_nomax = retried_nomax self.retried_withmax = retried_withmax self.state = state if state is not None else QUEUING @classmethod def from_dict(cls, d): """Construct a SubtaskStatus object from a dict representation.""" options = dict(d) task_id = options['task_id'] del options['task_id'] return SubtaskStatus.create(task_id, **options) @classmethod def create(cls, task_id, **options): """Construct a SubtaskStatus object.""" return cls(task_id, **options) def to_dict(self): """ Output a dict representation of a SubtaskStatus object. Use for creating a JSON-serializable representation for use by tasks. """ return self.__dict__ def increment(self, succeeded=0, failed=0, skipped=0, retried_nomax=0, retried_withmax=0, state=None): """ Update the result of a subtask with additional results. Kwarg arguments are incremented to the existing values. The exception is for `state`, which if specified is used to override the existing value. """ self.attempted += (succeeded + failed) self.succeeded += succeeded self.failed += failed self.skipped += skipped self.retried_nomax += retried_nomax self.retried_withmax += retried_withmax if state is not None: self.state = state def get_retry_count(self): """Returns the number of retries of any kind.""" return self.retried_nomax + self.retried_withmax def __repr__(self): """Return print representation of a SubtaskStatus object.""" return 'SubtaskStatus<%r>' % (self.to_dict(),) def __unicode__(self): """Return unicode version of a SubtaskStatus object representation.""" return unicode(repr(self)) def initialize_subtask_info(entry, action_name, total_num, subtask_id_list): """ Store initial subtask information to InstructorTask object. The InstructorTask's "task_output" field is initialized. This is a JSON-serialized dict. Counters for 'attempted', 'succeeded', 'failed', 'skipped' keys are initialized to zero, as is the 'duration_ms' value. A 'start_time' is stored for later duration calculations, and the total number of "things to do" is set, so the user can be told how much needs to be done overall. The `action_name` is also stored, to help with constructing more readable task_progress messages. The InstructorTask's "subtasks" field is also initialized. This is also a JSON-serialized dict. Keys include 'total', 'succeeded', 'retried', 'failed', which are counters for the number of subtasks. 'Total' is set here to the total number, while the other three are initialized to zero. Once the counters for 'succeeded' and 'failed' match the 'total', the subtasks are done and the InstructorTask's "status" will be changed to SUCCESS. The "subtasks" field also contains a 'status' key, that contains a dict that stores status information for each subtask. The value for each subtask (keyed by its task_id) is its subtask status, as defined by SubtaskStatus.to_dict(). This information needs to be set up in the InstructorTask before any of the subtasks start running. If not, there is a chance that the subtasks could complete before the parent task is done creating subtasks. Doing so also simplifies the save() here, as it avoids the need for locking. Monitoring code should assume that if an InstructorTask has subtask information, that it should rely on the status stored in the InstructorTask object, rather than status stored in the corresponding AsyncResult. """ task_progress = { 'action_name': action_name, 'attempted': 0, 'failed': 0, 'skipped': 0, 'succeeded': 0, 'total': total_num, 'duration_ms': int(0), 'start_time': time() } entry.task_output = InstructorTask.create_output_for_success(task_progress) entry.task_state = PROGRESS # Write out the subtasks information. num_subtasks = len(subtask_id_list) # Note that may not be necessary to store initial value with all those zeroes! # Write out as a dict, so it will go more smoothly into json. subtask_status = {subtask_id: (SubtaskStatus.create(subtask_id)).to_dict() for subtask_id in subtask_id_list} subtask_dict = { 'total': num_subtasks, 'succeeded': 0, 'failed': 0, 'status': subtask_status } entry.subtasks = json.dumps(subtask_dict) # and save the entry immediately, before any subtasks actually start work: entry.save_now() return task_progress # pylint: disable=bad-continuation def queue_subtasks_for_query( entry, action_name, create_subtask_fcn, item_querysets, item_fields, items_per_task, total_num_items, ): """ Generates and queues subtasks to each execute a chunk of "items" generated by a queryset. Arguments: `entry` : the InstructorTask object for which subtasks are being queued. `action_name` : a past-tense verb that can be used for constructing readable status messages. `create_subtask_fcn` : a function of two arguments that constructs the desired kind of subtask object. Arguments are the list of items to be processed by this subtask, and a SubtaskStatus object reflecting initial status (and containing the subtask's id). `item_querysets` : a list of query sets that define the "items" that should be passed to subtasks. `item_fields` : the fields that should be included in the dict that is returned. These are in addition to the 'pk' field. `items_per_task` : maximum size of chunks to break each query chunk into for use by a subtask. `total_num_items` : total amount of items that will be put into subtasks Returns: the task progress as stored in the InstructorTask object. """ task_id = entry.task_id # Calculate the number of tasks that will be created, and create a list of ids for each task. total_num_subtasks = _get_number_of_subtasks(total_num_items, items_per_task) subtask_id_list = [str(uuid4()) for _ in range(total_num_subtasks)] # Update the InstructorTask with information about the subtasks we've defined. TASK_LOG.info( "Task %s: updating InstructorTask %s with subtask info for %s subtasks to process %s items.", task_id, entry.id, total_num_subtasks, total_num_items, ) # Make sure this is committed to database before handing off subtasks to celery. with outer_atomic(): progress = initialize_subtask_info(entry, action_name, total_num_items, subtask_id_list) # Construct a generator that will return the recipients to use for each subtask. # Pass in the desired fields to fetch for each recipient. item_list_generator = _generate_items_for_subtask( item_querysets, item_fields, total_num_items, items_per_task, total_num_subtasks, entry.course_id, ) # Now create the subtasks, and start them running. TASK_LOG.info( "Task %s: creating %s subtasks to process %s items.", task_id, total_num_subtasks, total_num_items, ) num_subtasks = 0 for item_list in item_list_generator: subtask_id = subtask_id_list[num_subtasks] num_subtasks += 1 subtask_status = SubtaskStatus.create(subtask_id) new_subtask = create_subtask_fcn(item_list, subtask_status) new_subtask.apply_async() # Subtasks have been queued so no exceptions should be raised after this point. # Return the task progress as stored in the InstructorTask object. return progress def _acquire_subtask_lock(task_id): """ Mark the specified task_id as being in progress. This is used to make sure that the same task is not worked on by more than one worker at the same time. This can occur when tasks are requeued by Celery in response to loss of connection to the task broker. Most of the time, such duplicate tasks are run sequentially, but they can overlap in processing as well. Returns true if the task_id was not already locked; false if it was. """ # cache.add fails if the key already exists key = "subtask-{}".format(task_id) succeeded = cache.add(key, 'true', SUBTASK_LOCK_EXPIRE) if not succeeded: TASK_LOG.warning("task_id '%s': already locked. Contains value '%s'", task_id, cache.get(key)) return succeeded def _release_subtask_lock(task_id): """ Unmark the specified task_id as being no longer in progress. This is most important to permit a task to be retried. """ # According to Celery task cookbook, "Memcache delete is very slow, but we have # to use it to take advantage of using add() for atomic locking." key = "subtask-{}".format(task_id) cache.delete(key) def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status): """ Confirms that the current subtask is known to the InstructorTask and hasn't already been completed. Problems can occur when the parent task has been run twice, and results in duplicate subtasks being created for the same InstructorTask entry. This maybe happens when Celery loses its connection to its broker, and any current tasks get requeued. If a parent task gets requeued, then the same InstructorTask may have a different set of subtasks defined (to do the same thing), so the subtasks from the first queuing would not be known to the InstructorTask. We return an exception in this case. If a subtask gets requeued, then the first time the subtask runs it should run fine to completion. However, we want to prevent it from running again, so we check here to see what the existing subtask's status is. If it is complete, we raise an exception. We also take a lock on the task, so that we can detect if another worker has started work but has not yet completed that work. The other worker is allowed to finish, and this raises an exception. Raises a DuplicateTaskException exception if it's not a task that should be run. If this succeeds, it requires that update_subtask_status() is called to release the lock on the task. """ # Confirm that the InstructorTask actually defines subtasks. entry = InstructorTask.objects.get(pk=entry_id) if len(entry.subtasks) == 0: format_str = "Unexpected task_id '{}': unable to find subtasks of instructor task '{}': rejecting task {}" msg = format_str.format(current_task_id, entry, new_subtask_status) TASK_LOG.warning(msg) dog_stats_api.increment('instructor_task.subtask.duplicate.nosubtasks', tags=[entry.course_id]) raise DuplicateTaskException(msg) # Confirm that the InstructorTask knows about this particular subtask. subtask_dict = json.loads(entry.subtasks) subtask_status_info = subtask_dict['status'] if current_task_id not in subtask_status_info: format_str = "Unexpected task_id '{}': unable to find status for subtask of instructor task '{}': rejecting task {}" msg = format_str.format(current_task_id, entry, new_subtask_status) TASK_LOG.warning(msg) dog_stats_api.increment('instructor_task.subtask.duplicate.unknown', tags=[entry.course_id]) raise DuplicateTaskException(msg) # Confirm that the InstructorTask doesn't think that this subtask has already been # performed successfully. subtask_status = SubtaskStatus.from_dict(subtask_status_info[current_task_id]) subtask_state = subtask_status.state if subtask_state in READY_STATES: format_str = "Unexpected task_id '{}': already completed - status {} for subtask of instructor task '{}': rejecting task {}" msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status) TASK_LOG.warning(msg) dog_stats_api.increment('instructor_task.subtask.duplicate.completed', tags=[entry.course_id]) raise DuplicateTaskException(msg) # Confirm that the InstructorTask doesn't think that this subtask is already being # retried by another task. if subtask_state == RETRY: # Check to see if the input number of retries is less than the recorded number. # If so, then this is an earlier version of the task, and a duplicate. new_retry_count = new_subtask_status.get_retry_count() current_retry_count = subtask_status.get_retry_count() if new_retry_count < current_retry_count: format_str = "Unexpected task_id '{}': already retried - status {} for subtask of instructor task '{}': rejecting task {}" msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status) TASK_LOG.warning(msg) dog_stats_api.increment('instructor_task.subtask.duplicate.retried', tags=[entry.course_id]) raise DuplicateTaskException(msg) # Now we are ready to start working on this. Try to lock it. # If it fails, then it means that another worker is already in the # middle of working on this. if not _acquire_subtask_lock(current_task_id): format_str = "Unexpected task_id '{}': already being executed - for subtask of instructor task '{}'" msg = format_str.format(current_task_id, entry) TASK_LOG.warning(msg) dog_stats_api.increment('instructor_task.subtask.duplicate.locked', tags=[entry.course_id]) raise DuplicateTaskException(msg) def update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count=0): """ Update the status of the subtask in the parent InstructorTask object tracking its progress. Because select_for_update is used to lock the InstructorTask object while it is being updated, multiple subtasks updating at the same time may time out while waiting for the lock. The actual update operation is surrounded by a try/except/else that permits the update to be retried if the transaction times out. The subtask lock acquired in the call to check_subtask_is_valid() is released here, only when the attempting of retries has concluded. """ try: _update_subtask_status(entry_id, current_task_id, new_subtask_status) except DatabaseError: # If we fail, try again recursively. retry_count += 1 if retry_count < MAX_DATABASE_LOCK_RETRIES: TASK_LOG.info("Retrying to update status for subtask %s of instructor task %d with status %s: retry %d", current_task_id, entry_id, new_subtask_status, retry_count) dog_stats_api.increment('instructor_task.subtask.retry_after_failed_update') update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count) else: TASK_LOG.info("Failed to update status after %d retries for subtask %s of instructor task %d with status %s", retry_count, current_task_id, entry_id, new_subtask_status) dog_stats_api.increment('instructor_task.subtask.failed_after_update_retries') raise finally: # Only release the lock on the subtask when we're done trying to update it. # Note that this will be called each time a recursive call to update_subtask_status() # returns. Fortunately, it's okay to release a lock that has already been released. _release_subtask_lock(current_task_id) @transaction.atomic def _update_subtask_status(entry_id, current_task_id, new_subtask_status): """ Update the status of the subtask in the parent InstructorTask object tracking its progress. Uses select_for_update to lock the InstructorTask object while it is being updated. The operation is surrounded by a try/except/else that permit the manual transaction to be committed on completion, or rolled back on error. The InstructorTask's "task_output" field is updated. This is a JSON-serialized dict. Accumulates values for 'attempted', 'succeeded', 'failed', 'skipped' from `new_subtask_status` into the corresponding values in the InstructorTask's task_output. Also updates the 'duration_ms' value with the current interval since the original InstructorTask started. Note that this value is only approximate, since the subtask may be running on a different server than the original task, so is subject to clock skew. The InstructorTask's "subtasks" field is also updated. This is also a JSON-serialized dict. Keys include 'total', 'succeeded', 'retried', 'failed', which are counters for the number of subtasks. 'Total' is expected to have been set at the time the subtasks were created. The other three counters are incremented depending on the value of `status`. Once the counters for 'succeeded' and 'failed' match the 'total', the subtasks are done and the InstructorTask's "status" is changed to SUCCESS. The "subtasks" field also contains a 'status' key, that contains a dict that stores status information for each subtask. At the moment, the value for each subtask (keyed by its task_id) is the value of the SubtaskStatus.to_dict(), but could be expanded in future to store information about failure messages, progress made, etc. """ TASK_LOG.info("Preparing to update status for subtask %s for instructor task %d with status %s", current_task_id, entry_id, new_subtask_status) try: entry = InstructorTask.objects.select_for_update().get(pk=entry_id) subtask_dict = json.loads(entry.subtasks) subtask_status_info = subtask_dict['status'] if current_task_id not in subtask_status_info: # unexpected error -- raise an exception format_str = "Unexpected task_id '{}': unable to update status for subtask of instructor task '{}'" msg = format_str.format(current_task_id, entry_id) TASK_LOG.warning(msg) raise ValueError(msg) # Update status: subtask_status_info[current_task_id] = new_subtask_status.to_dict() # Update the parent task progress. # Set the estimate of duration, but only if it # increases. Clock skew between time() returned by different machines # may result in non-monotonic values for duration. task_progress = json.loads(entry.task_output) start_time = task_progress['start_time'] prev_duration = task_progress['duration_ms'] new_duration = int((time() - start_time) * 1000) task_progress['duration_ms'] = max(prev_duration, new_duration) # Update counts only when subtask is done. # In future, we can make this more responsive by updating status # between retries, by comparing counts that change from previous # retry. new_state = new_subtask_status.state if new_subtask_status is not None and new_state in READY_STATES: for statname in ['attempted', 'succeeded', 'failed', 'skipped']: task_progress[statname] += getattr(new_subtask_status, statname) # Figure out if we're actually done (i.e. this is the last task to complete). # This is easier if we just maintain a counter, rather than scanning the # entire new_subtask_status dict. if new_state == SUCCESS: subtask_dict['succeeded'] += 1 elif new_state in READY_STATES: subtask_dict['failed'] += 1 num_remaining = subtask_dict['total'] - subtask_dict['succeeded'] - subtask_dict['failed'] # If we're done with the last task, update the parent status to indicate that. # At present, we mark the task as having succeeded. In future, we should see # if there was a catastrophic failure that occurred, and figure out how to # report that here. if num_remaining <= 0: entry.task_state = SUCCESS entry.subtasks = json.dumps(subtask_dict) entry.task_output = InstructorTask.create_output_for_success(task_progress) TASK_LOG.debug("about to save....") entry.save() TASK_LOG.info("Task output updated to %s for subtask %s of instructor task %d", entry.task_output, current_task_id, entry_id) except Exception: TASK_LOG.exception("Unexpected error while updating InstructorTask.") dog_stats_api.increment('instructor_task.subtask.update_exception') raise
agpl-3.0
cleinias/Homeo
src/Webots/Homeo-experiments/controllers/Type-2b-Orthodox-Braitenberg/Type-2b-Orthodox-Braitenberg.py
1
2330
################################################################# # First test controlling a khepera robot in a simple Webots world # simulating a straightforward Braitenberg-like type-2b vehicle: # # 2 sensors and 2 motors, with crossed connections # # Stefano Franchi 2014 ################################################################# from __future__ import division from controller import * import sys class Vehicle2 (DifferentialWheels): counter = 1 def run(self): self.leftEye = self.getLightSensor('ls0') self.rightEye = self.getLightSensor('ls1') self.leftEye.enable(32) self.rightEye.enable(32) counter = 1 maxSpeed = 100 maxLight = 1200 maxDelta = 0 while (self.step(32) != -1): # Read the sensors: # Take the complement of sensors' values, since webots' sensors return a value close to zero # for maximum intensity and values close to maxLight for minimum intensity rightLightValue = maxLight - self.rightEye.getValue() leftLightValue = maxLight - self.leftEye.getValue() # Cross sensor data to opposite motor, as in a straightforward # Braitenberg type-2b vehicle. # Also, scale sensor values to speed range : rightSpeed = leftLightValue * (maxSpeed/maxLight) leftSpeed = rightLightValue * (maxSpeed/maxLight) #========================================================================= # # "Debugging info" # # speedDelta = abs(rightSpeed - leftSpeed) # if speedDelta > maxDelta: # maxDelta = speedDelta # print "Right eye: %d Left eye: %d Right Speed %f Left Speed %f Speed delta is %f Max so far is %f\r" % (rightLightValue, # leftLightValue, # rightSpeed, # leftSpeed, # speedDelta, # maxDelta) # counter = counter + 1 #========================================================================= self.setSpeed(leftSpeed, rightSpeed) robot = Vehicle2() robot.run()
gpl-3.0
rosenvladimirov/addons
l10n_bg/__openerp__.py
1
2431
# -*- coding: utf-8 -*- ############################################################################## # # Odoo Bulgaria Accounting, Open Source Accounting and Invoiceing Module # Copyright (C) 2016 Rosen Vladimirov, Prodax LTD # (vladimirov.rosen@gmail.com, http://www.prodax.bg) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { "name" : "Bulgaria - Accounting", "version" : "1.0", "author" : "Rosen Vladimirov", "website": "http://www.prodax.bg", "category" : "Localization/Account Charts", "depends" : ['account','account_chart','base_vat'], "description": """ This is the module to manage the Accounting Chart, VAT structure, Fiscal Position and Tax Mapping. It also adds the Registration Number for Bulgaria in OpenERP. ================================================================================= Bulgarian accounting chart and localization. """, 'depends': [ 'account', 'base_vat', 'base_iban', 'account_chart', 'l10n_multilang', 'report_qweb_element_page_visibility', ], "demo" : [], "data" : [ 'data/account_chart.xml', 'data/account_tax_code_template.xml', 'data/account_chart_template.xml', 'data/account_chart_template.yml', 'data/account_tax_template.xml', 'data/fiscal_position_template.xml', 'wizard/l10n_chart_bg_wizard.xml', 'data/res.country.state.csv', 'data/res.bank.csv', 'security/ir.model.access.csv', ], "license": "AGPL-3", "installable": True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
CaliOpen/CaliOpen
src/backend/components/py.data/caliopen_data/provider.py
1
2237
"""Data provider class for dataset processing.""" from __future__ import absolute_import, print_function, unicode_literals import os from io import open import zope.interface from elasticsearch import Elasticsearch from .interface import IDataProvider class DataProvider(object): """Class to interact with a data provider interface.""" def __init__(self, config): """Initialize a data provider, connecting it to store.""" self.config = config self._store = self._connect_store() self._search = None self.iterator = None def prepare(self, query, **kwargs): """Prepare the query to be iterated.""" self._search = self._prepare(query, **kwargs) self.iterator = self._execute(**kwargs) def next(self): """Iterator method over search results.""" if not self.iterator: raise StopIteration for item in self.iterator: yield self._format_item(item) class FileDataProvider(DataProvider): """Data provider reading from a file.""" zope.interface.implements(IDataProvider) def __init__(self, config): """Create a new FileDataProvider.""" super(FileDataProvider, self).__init__(config) self._filename = None def _connect_store(self): pass def _prepare(self, filename, **kwargs): if not os.path.isfile(filename): raise ValueError('No such file: {0}'.format(filename)) self._filename = filename def _execute(self): if not self._filename: raise ValueError('No prepared file') with open(self._filename, 'r', encoding="utf-8") as f: return f.read().split('\n') class ESProvider(DataProvider): """Data provider reading from an elasticsearch query.""" zope.interface.implements(IDataProvider) def _connect_store(self): config = self.config.get('elasticsearch') return Elasticsearch(config['url']) def _prepare(self, query, index=None, doc_type=None, **kwargs): self.query = query self.query = self.query.using(self._store).index(index). \ doc_type(doc_type) def _execute(self, **kwargs): return self.query.scan()
gpl-3.0
Bysmyyr/chromium-crosswalk
third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
8
5815
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest from webkitpy.layout_tests.port import mac from webkitpy.layout_tests.port import port_testcase from webkitpy.tool.mocktool import MockOptions class MacPortTest(port_testcase.PortTestCase): os_name = 'mac' os_version = 'snowleopard' port_name = 'mac' full_port_name = 'mac-snowleopard' port_maker = mac.MacPort def assert_name(self, port_name, os_version_string, expected): port = self.make_port(os_version=os_version_string, port_name=port_name) self.assertEqual(expected, port.name()) def test_versions(self): self.assertTrue(self.make_port().name() in ('mac-snowleopard', 'mac-lion', 'mac-mountainlion', 'mac-mavericks', 'mac-mac10.10')) self.assert_name(None, 'snowleopard', 'mac-snowleopard') self.assert_name('mac', 'snowleopard', 'mac-snowleopard') self.assert_name('mac-snowleopard', 'leopard', 'mac-snowleopard') self.assert_name('mac-snowleopard', 'snowleopard', 'mac-snowleopard') self.assert_name(None, 'lion', 'mac-lion') self.assert_name(None, 'mountainlion', 'mac-mountainlion') self.assert_name(None, 'mavericks', 'mac-mavericks') self.assert_name(None, 'mac10.10', 'mac-mac10.10') self.assert_name(None, 'future', 'mac-mac10.10') self.assert_name('mac', 'lion', 'mac-lion') self.assertRaises(AssertionError, self.assert_name, None, 'tiger', 'should-raise-assertion-so-this-value-does-not-matter') def test_baseline_path(self): port = self.make_port(port_name='mac-snowleopard') self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-snowleopard')) port = self.make_port(port_name='mac-lion') self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-lion')) port = self.make_port(port_name='mac-mountainlion') self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-mountainlion')) port = self.make_port(port_name='mac-mavericks') self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-mavericks')) port = self.make_port(port_name='mac-mac10.10') self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac')) def test_operating_system(self): self.assertEqual('mac', self.make_port().operating_system()) def test_build_path(self): # Test that optional paths are used regardless of whether they exist. options = MockOptions(configuration='Release', build_directory='/foo') self.assert_build_path(options, ['/mock-checkout/out/Release'], '/foo/Release') # Test that optional relative paths are returned unmodified. options = MockOptions(configuration='Release', build_directory='foo') self.assert_build_path(options, ['/mock-checkout/out/Release'], 'foo/Release') # Test that we prefer the legacy dir over the new dir. options = MockOptions(configuration='Release', build_directory=None) self.assert_build_path(options, ['/mock-checkout/xcodebuild/Release', '/mock-checkout/out/Release'], '/mock-checkout/xcodebuild/Release') def test_build_path_timestamps(self): options = MockOptions(configuration='Release', build_directory=None) port = self.make_port(options=options) port.host.filesystem.maybe_make_directory('/mock-checkout/out/Release') port.host.filesystem.maybe_make_directory('/mock-checkout/xcodebuild/Release') # Check with 'out' being newer. port.host.filesystem.mtime = lambda f: 5 if '/out/' in f else 4 self.assertEqual(port._build_path(), '/mock-checkout/out/Release') # Check with 'xcodebuild' being newer. port.host.filesystem.mtime = lambda f: 5 if '/xcodebuild/' in f else 4 self.assertEqual(port._build_path(), '/mock-checkout/xcodebuild/Release') def test_driver_name_option(self): self.assertTrue(self.make_port()._path_to_driver().endswith('Content Shell')) self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver')) def test_path_to_image_diff(self): self.assertEqual(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/image_diff')
bsd-3-clause
eric-haibin-lin/mxnet
python/mxnet/context.py
4
9716
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 """Context management API of mxnet.""" import threading import warnings import ctypes from .base import classproperty, with_metaclass, _MXClassPropertyMetaClass from .base import _LIB from .base import check_call class Context(with_metaclass(_MXClassPropertyMetaClass, object)): """Constructs a context. MXNet can run operations on CPU and different GPUs. A context describes the device type and ID on which computation should be carried on. One can use mx.cpu and mx.gpu for short. See also ---------- `How to run MXNet on multiple CPU/GPUs <http://mxnet.incubator.apache.org/api/faq/distributed_training>` for more details. Parameters ---------- device_type : {'cpu', 'gpu'} or Context. String representing the device type. device_id : int (default=0) The device id of the device, needed for GPU. Note ---- Context can also be used as a way to change the default context. Examples -------- >>> # array on cpu >>> cpu_array = mx.nd.ones((2, 3)) >>> # switch default context to GPU(2) >>> with mx.Context(mx.gpu(2)): ... gpu_array = mx.nd.ones((2, 3)) >>> gpu_array.context gpu(2) One can also explicitly specify the context when creating an array. >>> gpu_array = mx.nd.ones((2, 3), mx.gpu(1)) >>> gpu_array.context gpu(1) """ # static class variable _default_ctx = threading.local() devtype2str = {1: 'cpu', 2: 'gpu', 3: 'cpu_pinned', 5: 'cpu_shared'} devstr2type = {'cpu': 1, 'gpu': 2, 'cpu_pinned': 3, 'cpu_shared': 5} def __init__(self, device_type, device_id=0): if isinstance(device_type, Context): self.device_typeid = device_type.device_typeid self.device_id = device_type.device_id else: self.device_typeid = Context.devstr2type[device_type] self.device_id = device_id self._old_ctx = None @property def device_type(self): """Returns the device type of current context. Examples ------- >>> mx.context.current_context().device_type 'cpu' >>> mx.current_context().device_type 'cpu' Returns ------- device_type : str """ return Context.devtype2str[self.device_typeid] def __hash__(self): """Compute hash value of context for dictionary lookup""" return hash((self.device_typeid, self.device_id)) def __eq__(self, other): """Compares two contexts. Two contexts are equal if they have the same device type and device id. """ return isinstance(other, Context) and \ self.device_typeid == other.device_typeid and \ self.device_id == other.device_id def __str__(self): return '%s(%d)' % (self.device_type, self.device_id) def __repr__(self): return self.__str__() def __enter__(self): if not hasattr(Context._default_ctx, "value"): Context._default_ctx.value = Context('cpu', 0) self._old_ctx = Context._default_ctx.value Context._default_ctx.value = self return self def __exit__(self, ptype, value, trace): Context._default_ctx.value = self._old_ctx #pylint: disable=no-self-argument @classproperty def default_ctx(cls): warnings.warn("Context.default_ctx has been deprecated. " "Please use Context.current_context() instead. " "Please use test_utils.set_default_context to set a default context", DeprecationWarning) if not hasattr(Context._default_ctx, "value"): cls._default_ctx.value = Context('cpu', 0) return cls._default_ctx.value @default_ctx.setter def default_ctx(cls, val): warnings.warn("Context.default_ctx has been deprecated. " "Please use Context.current_context() instead. " "Please use test_utils.set_default_context to set a default context", DeprecationWarning) cls._default_ctx.value = val #pylint: enable=no-self-argument def empty_cache(self): """Empties the memory cache for the current contexts device. MXNet utilizes a memory pool to avoid excessive allocations. Calling empty_cache will empty the memory pool of the contexts device. This will only free the memory of the unreferenced data. Examples ------- >>> ctx = mx.gpu(0) >>> arr = mx.nd.ones((200,200), ctx=ctx) >>> del arr >>> ctx.empty_cache() # forces release of memory allocated for arr """ dev_type = ctypes.c_int(self.device_typeid) dev_id = ctypes.c_int(self.device_id) check_call(_LIB.MXStorageEmptyCache(dev_type, dev_id)) # initialize the default context in Context Context._default_ctx.value = Context('cpu', 0) def cpu(device_id=0): """Returns a CPU context. This function is a short cut for ``Context('cpu', device_id)``. For most operations, when no context is specified, the default context is `cpu()`. Examples ---------- >>> with mx.cpu(): ... cpu_array = mx.nd.ones((2, 3)) >>> cpu_array.context cpu(0) >>> cpu_array = mx.nd.ones((2, 3), ctx=mx.cpu()) >>> cpu_array.context cpu(0) Parameters ---------- device_id : int, optional The device id of the device. `device_id` is not needed for CPU. This is included to make interface compatible with GPU. Returns ------- context : Context The corresponding CPU context. """ return Context('cpu', device_id) def cpu_pinned(device_id=0): """Returns a CPU pinned memory context. Copying from CPU pinned memory to GPU is faster than from normal CPU memory. This function is a short cut for ``Context('cpu_pinned', device_id)``. Examples ---------- >>> with mx.cpu_pinned(): ... cpu_array = mx.nd.ones((2, 3)) >>> cpu_array.context cpu_pinned(0) >>> cpu_array = mx.nd.ones((2, 3), ctx=mx.cpu_pinned()) >>> cpu_array.context cpu_pinned(0) Parameters ---------- device_id : int, optional The device id of the device. `device_id` is not needed for CPU. This is included to make interface compatible with GPU. Returns ------- context : Context The corresponding CPU pinned memory context. """ return Context('cpu_pinned', device_id) def gpu(device_id=0): """Returns a GPU context. This function is a short cut for Context('gpu', device_id). The K GPUs on a node are typically numbered as 0,...,K-1. Examples ---------- >>> cpu_array = mx.nd.ones((2, 3)) >>> cpu_array.context cpu(0) >>> with mx.gpu(1): ... gpu_array = mx.nd.ones((2, 3)) >>> gpu_array.context gpu(1) >>> gpu_array = mx.nd.ones((2, 3), ctx=mx.gpu(1)) >>> gpu_array.context gpu(1) Parameters ---------- device_id : int, optional The device id of the device, needed for GPU. Returns ------- context : Context The corresponding GPU context. """ return Context('gpu', device_id) def num_gpus(): """Query CUDA for the number of GPUs present. Raises ------ Will raise an exception on any CUDA error. Returns ------- count : int The number of GPUs. """ count = ctypes.c_int() check_call(_LIB.MXGetGPUCount(ctypes.byref(count))) return count.value def gpu_memory_info(device_id=0): """Query CUDA for the free and total bytes of GPU global memory. Parameters ---------- device_id : int, optional The device id of the GPU device. Raises ------ Will raise an exception on any CUDA error. Returns ------- (free, total) : (int, int) """ free = ctypes.c_uint64() total = ctypes.c_uint64() dev_id = ctypes.c_int(device_id) check_call(_LIB.MXGetGPUMemoryInformation64(dev_id, ctypes.byref(free), ctypes.byref(total))) return (free.value, total.value) def current_context(): """Returns the current context. By default, `mx.cpu()` is used for all the computations and it can be overridden by using `with mx.Context(x)` statement where x can be cpu(device_id) or gpu(device_id). Examples ------- >>> mx.current_context() cpu(0) >>> with mx.Context('gpu', 1): # Context changed in `with` block. ... mx.current_context() # Computation done here will be on gpu(1). ... gpu(1) >>> mx.current_context() # Back to default context. cpu(0) Returns ------- default_ctx : Context """ if not hasattr(Context._default_ctx, "value"): Context._default_ctx.value = Context('cpu', 0) return Context._default_ctx.value
apache-2.0
jirikuncar/Flask-Collect
flask_collect/storage/base.py
1
1374
""" Abstract Storage. """ from __future__ import print_function from os import path as op, walk class BaseStorage(): """ Base class for storages. """ def __init__(self, collect, verbose=False): self.verbose = verbose self.collect = collect def __iter__(self): """ Seek static files and result full and relative paths. :return generator: Walk files """ for bp in [self.collect.app] + list(self.collect.blueprints.values()): if bp.has_static_folder and op.isdir(bp.static_folder): for root, _, files in walk(bp.static_folder): for f in files: fpath = op.join(root, f) opath = op.relpath(fpath, bp.static_folder.rstrip('/')) if bp.static_url_path and self.collect.static_url and \ bp.static_url_path.startswith( op.join(self.collect.static_url, '')): # noqa opath = op.join( op.relpath( bp.static_url_path, self.collect.static_url), opath) yield bp, fpath, opath def log(self, msg): """ Log message. """ if self.verbose: print(msg)
bsd-3-clause
flazzarini/flask-jwt
docs/conf.py
5
6665
# -*- coding: utf-8 -*- # # flask-jwt documentation build configuration file, created by # sphinx-quickstart on Fri May 28 11:39:14 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) sys.path.append(os.path.abspath('_themes')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Flask-JWT' copyright = u'2014, Matt Wright' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.2.0' # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. default_role = 'obj' # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. #pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'flask_small' #html_theme = 'default' html_theme_options = { 'index_logo': False, 'github_fork': 'mattupstate/flask-jwt' } # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'flask-jwtdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'flask-jwt.tex', u'flask-jwt Documentation', u'Dan Jacob', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True intersphinx_mapping = {'http://docs.python.org/': None, 'http://flask.pocoo.org/docs/': None}
mit
wsmith323/django
django/db/backends/sqlite3/operations.py
106
10799
from __future__ import unicode_literals import datetime import uuid from django.conf import settings from django.core.exceptions import FieldError, ImproperlyConfigured from django.db import utils from django.db.backends import utils as backend_utils from django.db.backends.base.operations import BaseDatabaseOperations from django.db.models import aggregates, fields from django.utils import six, timezone from django.utils.dateparse import parse_date, parse_datetime, parse_time from django.utils.duration import duration_string try: import pytz except ImportError: pytz = None class DatabaseOperations(BaseDatabaseOperations): def bulk_batch_size(self, fields, objs): """ SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of 999 variables per query. If there is just single field to insert, then we can hit another limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500. """ limit = 999 if len(fields) > 1 else 500 return (limit // len(fields)) if len(fields) > 0 else len(objs) def check_expression_support(self, expression): bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField) bad_aggregates = (aggregates.Sum, aggregates.Avg, aggregates.Variance, aggregates.StdDev) if isinstance(expression, bad_aggregates): for expr in expression.get_source_expressions(): try: output_field = expr.output_field if isinstance(output_field, bad_fields): raise NotImplementedError( 'You cannot use Sum, Avg, StdDev, and Variance ' 'aggregations on date/time fields in sqlite3 ' 'since date/time is saved as text.' ) except FieldError: # Not every subexpression has an output_field which is fine # to ignore. pass def date_extract_sql(self, lookup_type, field_name): # sqlite doesn't support extract, so we fake it with the user-defined # function django_date_extract that's registered in connect(). Note that # single quotes are used because this is a string (and could otherwise # cause a collision with a field name). return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name) def date_interval_sql(self, timedelta): return "'%s'" % duration_string(timedelta), [] def format_for_duration_arithmetic(self, sql): """Do nothing here, we will handle it in the custom function.""" return sql def date_trunc_sql(self, lookup_type, field_name): # sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined # function django_date_trunc that's registered in connect(). Note that # single quotes are used because this is a string (and could otherwise # cause a collision with a field name). return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name) def _require_pytz(self): if settings.USE_TZ and pytz is None: raise ImproperlyConfigured("This query requires pytz, but it isn't installed.") def datetime_cast_date_sql(self, field_name, tzname): self._require_pytz() return "django_datetime_cast_date(%s, %%s)" % field_name, [tzname] def datetime_extract_sql(self, lookup_type, field_name, tzname): # Same comment as in date_extract_sql. self._require_pytz() return "django_datetime_extract('%s', %s, %%s)" % ( lookup_type.lower(), field_name), [tzname] def datetime_trunc_sql(self, lookup_type, field_name, tzname): # Same comment as in date_trunc_sql. self._require_pytz() return "django_datetime_trunc('%s', %s, %%s)" % ( lookup_type.lower(), field_name), [tzname] def time_extract_sql(self, lookup_type, field_name): # sqlite doesn't support extract, so we fake it with the user-defined # function django_time_extract that's registered in connect(). Note that # single quotes are used because this is a string (and could otherwise # cause a collision with a field name). return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name) def drop_foreignkey_sql(self): return "" def pk_default_value(self): return "NULL" def _quote_params_for_last_executed_query(self, params): """ Only for last_executed_query! Don't use this to execute SQL queries! """ sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params)) # Bypass Django's wrappers and use the underlying sqlite3 connection # to avoid logging this query - it would trigger infinite recursion. cursor = self.connection.connection.cursor() # Native sqlite3 cursors cannot be used as context managers. try: return cursor.execute(sql, params).fetchone() finally: cursor.close() def last_executed_query(self, cursor, sql, params): # Python substitutes parameters in Modules/_sqlite/cursor.c with: # pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars); # Unfortunately there is no way to reach self->statement from Python, # so we quote and substitute parameters manually. if params: if isinstance(params, (list, tuple)): params = self._quote_params_for_last_executed_query(params) else: keys = params.keys() values = tuple(params.values()) values = self._quote_params_for_last_executed_query(values) params = dict(zip(keys, values)) return sql % params # For consistency with SQLiteCursorWrapper.execute(), just return sql # when there are no parameters. See #13648 and #17158. else: return sql def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def no_limit_value(self): return -1 def sql_flush(self, style, tables, sequences, allow_cascade=False): # NB: The generated SQL below is specific to SQLite # Note: The DELETE FROM... SQL generated below works for SQLite databases # because constraints don't exist sql = ['%s %s %s;' % ( style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'), style.SQL_FIELD(self.quote_name(table)) ) for table in tables] # Note: No requirement for reset of auto-incremented indices (cf. other # sql_flush() implementations). Just return SQL at this point return sql def adapt_datetimefield_value(self, value): if value is None: return None # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.") return six.text_type(value) def adapt_timefield_value(self, value): if value is None: return None # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): raise ValueError("SQLite backend does not support timezone-aware times.") return six.text_type(value) def get_db_converters(self, expression): converters = super(DatabaseOperations, self).get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == 'DateTimeField': converters.append(self.convert_datetimefield_value) elif internal_type == 'DateField': converters.append(self.convert_datefield_value) elif internal_type == 'TimeField': converters.append(self.convert_timefield_value) elif internal_type == 'DecimalField': converters.append(self.convert_decimalfield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) return converters def convert_datetimefield_value(self, value, expression, connection, context): if value is not None: if not isinstance(value, datetime.datetime): value = parse_datetime(value) if settings.USE_TZ: value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection, context): if value is not None: if not isinstance(value, datetime.date): value = parse_date(value) return value def convert_timefield_value(self, value, expression, connection, context): if value is not None: if not isinstance(value, datetime.time): value = parse_time(value) return value def convert_decimalfield_value(self, value, expression, connection, context): if value is not None: value = expression.output_field.format_number(value) value = backend_utils.typecast_decimal(value) return value def convert_uuidfield_value(self, value, expression, connection, context): if value is not None: value = uuid.UUID(value) return value def bulk_insert_sql(self, fields, placeholder_rows): return " UNION ALL ".join( "SELECT %s" % ", ".join(row) for row in placeholder_rows ) def combine_expression(self, connector, sub_expressions): # SQLite doesn't have a power function, so we fake it with a # user-defined function django_power that's registered in connect(). if connector == '^': return 'django_power(%s)' % ','.join(sub_expressions) return super(DatabaseOperations, self).combine_expression(connector, sub_expressions) def combine_duration_expression(self, connector, sub_expressions): if connector not in ['+', '-']: raise utils.DatabaseError('Invalid connector for timedelta: %s.' % connector) fn_params = ["'%s'" % connector] + sub_expressions if len(fn_params) > 3: raise ValueError('Too many params for timedelta operations.') return "django_format_dtdelta(%s)" % ', '.join(fn_params) def integer_field_range(self, internal_type): # SQLite doesn't enforce any integer constraints return (None, None)
bsd-3-clause
Mazecreator/tensorflow
tensorflow/contrib/learn/python/learn/estimators/composable_model_test.py
77
6392
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ComposableModel classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.framework.python.ops import variables as contrib_variables from tensorflow.contrib.layers.python.layers import feature_column from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.contrib.learn.python.learn.estimators import composable_model from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import head as head_lib from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import state_ops from tensorflow.python.platform import test def _iris_input_fn(): iris = base.load_iris() return { 'feature': constant_op.constant( iris.data, dtype=dtypes.float32) }, constant_op.constant( iris.target, shape=[150, 1], dtype=dtypes.int32) def _base_model_fn(features, labels, mode, params): model = params['model'] feature_columns = params['feature_columns'] head = params['head'] if mode == model_fn_lib.ModeKeys.TRAIN: logits = model.build_model(features, feature_columns, is_training=True) elif mode == model_fn_lib.ModeKeys.EVAL: logits = model.build_model(features, feature_columns, is_training=False) else: raise NotImplementedError def _train_op_fn(loss): global_step = contrib_variables.get_global_step() assert global_step train_step = model.get_train_step(loss) with ops.control_dependencies(train_step): with ops.get_default_graph().colocate_with(global_step): return state_ops.assign_add(global_step, 1).op return head.create_model_fn_ops( features=features, mode=mode, labels=labels, train_op_fn=_train_op_fn, logits=logits) def _linear_estimator(head, feature_columns): return estimator.Estimator( model_fn=_base_model_fn, params={ 'model': composable_model.LinearComposableModel( num_label_columns=head.logits_dimension), 'feature_columns': feature_columns, 'head': head }) def _joint_linear_estimator(head, feature_columns): return estimator.Estimator( model_fn=_base_model_fn, params={ 'model': composable_model.LinearComposableModel( num_label_columns=head.logits_dimension, _joint_weights=True), 'feature_columns': feature_columns, 'head': head }) def _dnn_estimator(head, feature_columns, hidden_units): return estimator.Estimator( model_fn=_base_model_fn, params={ 'model': composable_model.DNNComposableModel( num_label_columns=head.logits_dimension, hidden_units=hidden_units), 'feature_columns': feature_columns, 'head': head }) class ComposableModelTest(test.TestCase): def testLinearModel(self): """Tests that loss goes down with training.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 100) age = feature_column.real_valued_column('age') head = head_lib.multi_class_head(n_classes=2) classifier = _linear_estimator(head, feature_columns=[age, language]) classifier.fit(input_fn=input_fn, steps=1000) loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] classifier.fit(input_fn=input_fn, steps=2000) loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss2, loss1) self.assertLess(loss2, 0.01) def testJointLinearModel(self): """Tests that loss goes down with training.""" def input_fn(): return { 'age': sparse_tensor.SparseTensor( values=['1'], indices=[[0, 0]], dense_shape=[1, 1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 100) age = feature_column.sparse_column_with_hash_bucket('age', 2) head = head_lib.multi_class_head(n_classes=2) classifier = _joint_linear_estimator(head, feature_columns=[age, language]) classifier.fit(input_fn=input_fn, steps=1000) loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] classifier.fit(input_fn=input_fn, steps=2000) loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss2, loss1) self.assertLess(loss2, 0.01) def testDNNModel(self): """Tests multi-class classification using matrix data as input.""" cont_features = [feature_column.real_valued_column('feature', dimension=4)] head = head_lib.multi_class_head(n_classes=3) classifier = _dnn_estimator( head, feature_columns=cont_features, hidden_units=[3, 3]) classifier.fit(input_fn=_iris_input_fn, steps=1000) classifier.evaluate(input_fn=_iris_input_fn, steps=100) if __name__ == '__main__': test.main()
apache-2.0
dnozay/lettuce
tests/integration/lib/Django-1.3/django/conf/locale/ru/formats.py
232
1323
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j E Y г.' TIME_FORMAT = 'G:i:s' DATETIME_FORMAT = 'j E Y г. G:i:s' YEAR_MONTH_FORMAT = 'F Y г.' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y H:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%d.%m.%Y', # '25.10.2006' '%d.%m.%y', # '25.10.06' '%Y-%m-%d', # '2006-10-25' ) TIME_INPUT_FORMATS = ( '%H:%M:%S', # '14:30:59' '%H:%M', # '14:30' ) DATETIME_INPUT_FORMATS = ( '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y', # '25.10.06' '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = ' ' NUMBER_GROUPING = 3
gpl-3.0
derDavidT/sympy
sympy/liealgebras/tests/test_type_F.py
80
1222
from __future__ import division from sympy.liealgebras.cartan_type import CartanType from sympy.core.compatibility import range from sympy.matrices import Matrix def test_type_F(): c = CartanType("F4") m = Matrix(4, 4, [2, -1, 0, 0, -1, 2, -2, 0, 0, -1, 2, -1, 0, 0, -1, 2]) assert c.cartan_matrix() == m assert c.dimension() == 4 assert c.simple_root(3) == [0, 0, 0, 1] assert c.simple_root(4) == [-0.5, -0.5, -0.5, -0.5] assert c.roots() == 48 assert c.basis() == 52 diag = "0---0=>=0---0\n" + " ".join(str(i) for i in range(1, 5)) assert c.dynkin_diagram() == diag assert c.positive_roots() == {1: [1, -1, 0, 0], 2: [1, 1, 0, 0], 3: [1, 0, -1, 0], 4: [1, 0, 1, 0], 5: [1, 0, 0, -1], 6: [1, 0, 0, 1], 7: [0, 1, -1, 0], 8: [0, 1, 1, 0], 9: [0, 1, 0, -1], 10: [0, 1, 0, 1], 11: [0, 0, 1, -1], 12: [0, 0, 1, 1], 13: [1, 0, 0, 0], 14: [0, 1, 0, 0], 15: [0, 0, 1, 0], 16: [0, 0, 0, 1], 17: [1/2, 1/2, 1/2, 1/2], 18: [1/2, -1/2, 1/2, 1/2], 19: [1/2, 1/2, -1/2, 1/2], 20: [1/2, 1/2, 1/2, -1/2], 21: [1/2, 1/2, -1/2, -1/2], 22: [1/2, -1/2, 1/2, -1/2], 23: [1/2, -1/2, -1/2, 1/2], 24: [1/2, -1/2, -1/2, -1/2]}
bsd-3-clause
ReactiveX/RxPY
tests/test_observable/test_filter.py
1
14035
import unittest from rx.testing import TestScheduler, ReactiveTest, is_prime from rx.disposable import SerialDisposable from rx.operators import filter, filter_indexed on_next = ReactiveTest.on_next on_completed = ReactiveTest.on_completed on_error = ReactiveTest.on_error subscribe = ReactiveTest.subscribe subscribed = ReactiveTest.subscribed disposed = ReactiveTest.disposed created = ReactiveTest.created class RxException(Exception): pass # Helper function for raising exceptions within lambdas def _raise(ex): raise RxException(ex) def test_is_prime(): assert not is_prime(1) assert is_prime(2) assert is_prime(3) assert not is_prime(4) assert is_prime(5) assert not is_prime(6) class TestFilter(unittest.TestCase): def test_filter_complete(self): scheduler = TestScheduler() invoked = [0] xs = scheduler.create_hot_observable( on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next(380, 6), on_next(390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600), on_next(610, 12), on_error(620, 'ex'), on_completed(630)) def create(): def predicate(x): invoked[0] += 1 return is_prime(x) return xs.pipe(filter(predicate)) results = scheduler.start(create) assert results.messages == [on_next(230, 3), on_next(340, 5), on_next(390, 7), on_next(580, 11), on_completed(600)] assert xs.subscriptions == [subscribe(200, 600)] assert invoked[0] == 9 def test_filter_true(self): scheduler = TestScheduler() invoked = [0] xs = scheduler.create_hot_observable( on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next(380, 6), on_next(390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600)) def create(): def predicate(x): invoked[0] += 1 return True return xs.pipe(filter(predicate)) results = scheduler.start(create) assert results.messages == [on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next(380, 6), on_next( 390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600)] assert xs.subscriptions == [subscribe(200, 600)] assert invoked[0] == 9 def test_filter_false(self): scheduler = TestScheduler() invoked = [0] xs = scheduler.create_hot_observable(on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next( 380, 6), on_next(390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600)) def create(): def predicate(x): invoked[0] += 1 return False return xs.pipe(filter(predicate)) results = scheduler.start(create) assert results.messages == [on_completed(600)] assert xs.subscriptions == [subscribe(200, 600)] assert invoked[0] == 9 def test_filter_dispose(self): scheduler = TestScheduler() invoked = [0] xs = scheduler.create_hot_observable(on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next( 380, 6), on_next(390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600)) def create(): def predicate(x): invoked[0] += 1 return is_prime(x) return xs.pipe(filter(predicate)) results = scheduler.start(create, disposed=400) assert results.messages == [ on_next(230, 3), on_next(340, 5), on_next(390, 7)] assert xs.subscriptions == [subscribe(200, 400)] assert invoked[0] == 5 def test_filter_error(self): scheduler = TestScheduler() invoked = [0] ex = 'ex' xs = scheduler.create_hot_observable( on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next(380, 6), on_next(390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_error(600, ex), on_next(610, 12), on_error(620, 'ex'), on_completed(630)) def create(): def predicate(x): invoked[0] += 1 return is_prime(x) return xs.pipe(filter(predicate)) results = scheduler.start(create) assert results.messages == [on_next(230, 3), on_next( 340, 5), on_next(390, 7), on_next(580, 11), on_error(600, ex)] assert xs.subscriptions == [subscribe(200, 600)] assert(invoked[0] == 9) def test_filter_on_error(self): scheduler = TestScheduler() invoked = [0] ex = 'ex' xs = scheduler.create_hot_observable(on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next(380, 6), on_next( 390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600), on_next(610, 12), on_error(620, 'ex'), on_completed(630)) def create(): def predicate(x): invoked[0] += 1 if x > 5: raise Exception(ex) return is_prime(x) return xs.pipe(filter(predicate)) results = scheduler.start(create) assert results.messages == [ on_next(230, 3), on_next(340, 5), on_error(380, ex)] assert xs.subscriptions == [subscribe(200, 380)] assert(invoked[0] == 4) def test_filter_dispose_in_predicate(self): scheduler = TestScheduler() invoked = [0] ys = [None] xs = scheduler.create_hot_observable(on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next(380, 6), on_next( 390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600), on_next(610, 12), on_error(620, 'ex'), on_completed(630)) results = scheduler.create_observer() d = SerialDisposable() def action(scheduler, state): def predicate(x): invoked[0] += 1 if x == 8: d.dispose() return is_prime(x) ys[0] = xs.pipe(filter(predicate)) return ys[0] scheduler.schedule_absolute(created, action) def action1(scheduler, state): d.disposable = ys[0].subscribe(results) scheduler.schedule_absolute(subscribed, action1) def action2(scheduler, state): d.dispose() scheduler.schedule_absolute(disposed, action2) scheduler.start() assert results.messages == [ on_next(230, 3), on_next(340, 5), on_next(390, 7)] assert xs.subscriptions == [subscribe(200, 450)] assert(invoked[0] == 6) def test_filter_indexed_complete(self): scheduler = TestScheduler() invoked = [0] xs = scheduler.create_hot_observable(on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next(380, 6), on_next( 390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600), on_next(610, 12), on_error(620, 'ex'), on_completed(630)) def create(): def predicate(x, index): invoked[0] += 1 return is_prime(x + index * 10) return xs.pipe(filter_indexed(predicate)) results = scheduler.start(create) assert results.messages == [ on_next(230, 3), on_next(390, 7), on_completed(600)] assert xs.subscriptions == [subscribe(200, 600)] assert(invoked[0] == 9) def test_filter_indexed_true(self): scheduler = TestScheduler() invoked = [0] xs = scheduler.create_hot_observable(on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next( 380, 6), on_next(390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600)) def create(): def predicate(x, index): invoked[0] += 1 return True return xs.pipe(filter_indexed(predicate)) results = scheduler.start(create) assert results.messages == [on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next(380, 6), on_next( 390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600)] assert xs.subscriptions == [subscribe(200, 600)] assert(invoked[0] == 9) def test_filter_indexed_false(self): scheduler = TestScheduler() invoked = [0] xs = scheduler.create_hot_observable(on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next( 380, 6), on_next(390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600)) def create(): def predicate(x, index): invoked[0] += 1 return False return xs.pipe(filter_indexed(predicate)) results = scheduler.start(create) assert results.messages == [on_completed(600)] assert xs.subscriptions == [subscribe(200, 600)] assert(invoked[0] == 9) def test_filter_indexed_dispose(self): scheduler = TestScheduler() invoked = [0] xs = scheduler.create_hot_observable(on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next( 380, 6), on_next(390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600)) def create(): def predicate(x, index): invoked[0] += 1 return is_prime(x + index * 10) return xs.pipe(filter_indexed(predicate)) results = scheduler.start(create, disposed=400) assert results.messages == [on_next(230, 3), on_next(390, 7)] assert xs.subscriptions == [subscribe(200, 400)] assert(invoked[0] == 5) def test_filter_indexed_error(self): scheduler = TestScheduler() invoked = [0] ex = 'ex' xs = scheduler.create_hot_observable(on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next(380, 6), on_next( 390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_error(600, ex), on_next(610, 12), on_error(620, 'ex'), on_completed(630)) def create(): def predicate(x, index): invoked[0] += 1 return is_prime(x + index * 10) return xs.pipe(filter_indexed(predicate)) results = scheduler.start(create) assert results.messages == [ on_next(230, 3), on_next(390, 7), on_error(600, ex)] assert xs.subscriptions == [subscribe(200, 600)] assert(invoked[0] == 9) def test_filter_indexed_on_error(self): scheduler = TestScheduler() invoked = [0] ex = 'ex' xs = scheduler.create_hot_observable(on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next(380, 6), on_next( 390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600), on_next(610, 12), on_error(620, 'ex'), on_completed(630)) def create(): def predicate(x, index): invoked[0] += 1 if x > 5: raise Exception(ex) return is_prime(x + index * 10) return xs.pipe(filter_indexed(predicate)) results = scheduler.start(create) assert results.messages == [on_next(230, 3), on_error(380, ex)] assert xs.subscriptions == [subscribe(200, 380)] assert(invoked[0] == 4) def test_filter_indexed_dispose_in_predicate(self): scheduler = TestScheduler() ys = [None] invoked = [0] xs = scheduler.create_hot_observable(on_next(110, 1), on_next(180, 2), on_next(230, 3), on_next(270, 4), on_next(340, 5), on_next(380, 6), on_next( 390, 7), on_next(450, 8), on_next(470, 9), on_next(560, 10), on_next(580, 11), on_completed(600), on_next(610, 12), on_error(620, 'ex'), on_completed(630)) results = scheduler.create_observer() d = SerialDisposable() def action1(scheduler, state): def predicate(x, index): invoked[0] += 1 if x == 8: d.dispose() return is_prime(x + index * 10) ys[0] = xs.pipe(filter_indexed(predicate)) scheduler.schedule_absolute(created, action1) def action2(scheduler, state): d.disposable = ys[0].subscribe(results) scheduler.schedule_absolute(subscribed, action2) def action3(scheduler, state): d.dispose() scheduler.schedule_absolute(disposed, action3) scheduler.start() assert results.messages == [on_next(230, 3), on_next(390, 7)] assert xs.subscriptions == [subscribe(200, 450)] assert invoked[0] == 6
mit
oxc/Flexget
flexget/plugins/filter/private_torrents.py
4
1333
from __future__ import unicode_literals, division, absolute_import from builtins import * # pylint: disable=unused-import, redefined-builtin import logging from flexget import plugin from flexget.event import event log = logging.getLogger('priv_torrents') class FilterPrivateTorrents(object): """How to handle private torrents. private_torrents: yes|no Example:: private_torrents: no This would reject all torrent entries with private flag. Example:: private_torrents: yes This would reject all public torrents. Non-torrent content is not interviened. """ schema = {'type': 'boolean'} @plugin.priority(127) def on_task_modify(self, task, config): private_torrents = config for entry in task.accepted: if 'torrent' not in entry: log.debug('`%s` is not a torrent' % entry['title']) continue private = entry['torrent'].private if not private_torrents and private: entry.reject('torrent is marked as private', remember=True) elif private_torrents and not private: entry.reject('public torrent', remember=True) @event('plugin.register') def register_plugin(): plugin.register(FilterPrivateTorrents, 'private_torrents', api_ver=2)
mit
lubkoll/friendly-type-erasure
type_erasure/cpp_file_parser.py
1
16956
import clang_util import cpp import file_parser import re import util import parser_addition from clang.cindex import TypeKind def sequence_from_text(text): return [cpp.SimpleToken(entry) for entry in text.split(' ')] def get_alias_from_text(name, text): return cpp.Alias(name, [cpp.SimpleToken(spelling) for spelling in text.split(' ')]) def get_function_from_text(classname, functionname, return_str, text, function_type='function'): return cpp.Function(classname, functionname, return_str, [cpp.SimpleToken(spelling) for spelling in text.split(' ')], function_type) def returns_this(function): remaining_tokens = function.tokens[cpp.get_declaration_end_index(function.name, function.tokens):] return cpp.contains_sequence(remaining_tokens, sequence_from_text('return * this ;')) def get_table_return_type(function): index, offset = cpp.find_function_name(function.name, function.tokens) return_type = util.concat(function.tokens[:index], ' ') if return_type in ['const ' + function.classname + ' & ', function.classname + ' & ']: return 'void ' if return_type == function.classname + ' ': return if returns_this(function): return 'void ' return util.concat(function.tokens[:index], ' ') def replace_in_tokens(old_spelling, new_spelling, tokens): for token in tokens: if token.spelling == old_spelling: token.spelling = new_spelling def const_specifier(function): return 'const ' if cpp.is_const(function) else '' def same_tokens(tokens, other_tokens): if len(tokens) != len(other_tokens): return False for i in range(len(tokens)): if tokens[i].spelling != other_tokens[i].spelling: return False return True def contains(name, tokens): for token in tokens: if token.spelling == name: return True return False def get_function_name_for_tokens(name): if 'operator' in name: return 'operator ' + name[8:] return name def get_function_name_for_type_erasure(function): arg_extension = '' args = cpp.get_function_arguments(function) for arg in args: arg_extension += '_' + arg.type() arg_extension = arg_extension.replace('&', '_ref').replace('*', '_ptr') arg_extension = re.sub(' |::|\(|\)', '_', arg_extension) arg_extension = re.sub(r'<|>|\[|\]\(|\)\{\}', '', arg_extension) arg_extension = re.sub('_+', '_', arg_extension) arg_extension = arg_extension[:-1] if arg_extension.endswith('_') else arg_extension if function.name == 'operator()': return 'call' + arg_extension elif function.name == 'operator=': return 'assignment' + arg_extension elif function.name == 'operator+=': return 'add' + arg_extension elif function.name == 'operator*=': return 'multiply' + arg_extension elif function.name == 'operator-=': return 'subtract' + arg_extension elif function.name == 'operator-': return 'negate' + arg_extension elif function.name == 'operator/=': return 'divide' + arg_extension elif function.name == 'operator==': return 'compare' + arg_extension return function.name + arg_extension class CppFileParser(file_parser.FileProcessor): def __init__(self): self.scope = cpp.Namespace('global') def process_inclusion_directive(self, data, cursor): self.scope.add(cpp.InclusionDirective(clang_util.parse_inclusion_directive(data, cursor).replace('#include ', '').replace('\n', ''))) def process_open_include_guard(self, filename): self.scope.add(cpp.ScopeEntry('include_guard', parser_addition.extract_include_guard(filename))) def process_headers(self, headers): self.scope.add(cpp.ScopeEntry('headers', headers)) def process_open_namespace(self, data, cursor): self.scope.add(cpp.Namespace(cursor.spelling, clang_util.get_tokens(data.tu, cursor))) def process_close_namespace(self): self.scope.close() def process_open_class(self, data, cursor): if clang_util.get_tokens(data.tu, cursor)[2].spelling == clang_util.semicolon: self.scope.add(cpp.Class(data.current_struct.spelling, clang_util.get_tokens(data.tu, cursor)[:3])) # self.scope.add(ForwardDeclaration(util.concat(clang_util.get_tokens(data.tu, cursor)[:3], ' '))) else: self.scope.add(cpp.Class(data.current_struct.spelling, clang_util.get_tokens(data.tu, cursor))) def process_open_struct(self, data, cursor): if clang_util.get_tokens(data.tu, cursor)[2].spelling == clang_util.semicolon: self.scope.add(cpp.Struct(data.current_struct.spelling, clang_util.get_tokens(data.tu, cursor)[:3])) # self.scope.add(ForwardDeclaration(util.concat(clang_util.get_tokens(data.tu, cursor)[:3], ' '))) else: self.scope.add(cpp.Struct(data.current_struct.spelling, clang_util.get_tokens(data.tu, cursor))) def process_close_class(self): if self.scope.get_open_scope().get_type() == cpp.NAMESPACE: return self.scope.close() def process_function(self, data, cursor): classname = '' if data.current_struct.spelling: classname = data.current_struct.spelling current_scope = self.scope.get_open_scope() tokens = clang_util.get_tokens(data.tu, cursor) tokens = tokens[:cpp.get_body_end_index(cursor.spelling, tokens)] if current_scope.get_tokens() and not tokens[-1].spelling == clang_util.semicolon: tokens = clang_util.get_all_tokens(tokens, current_scope.get_tokens()) function_type = cpp.FUNCTION if clang_util.is_function_template(cursor.kind): function_type = cpp.FUNCTION_TEMPLATE self.scope.add(cpp.Function(classname, cursor.spelling, cursor.result_type.kind != TypeKind.VOID and 'return ' or '', tokens, function_type)) def process_function_template(self, data, cursor): self.process_function(data, cursor) def process_constructor(self, data, cursor): classname = '' if data.current_struct.spelling: classname = data.current_struct.spelling current_scope = self.scope.get_open_scope() tokens = clang_util.get_tokens(data.tu, cursor) tokens = tokens[:cpp.get_body_end_index(cursor.spelling, tokens)] if current_scope.get_tokens() and not tokens[-1].spelling == clang_util.semicolon: tokens = clang_util.get_all_tokens(tokens, current_scope.get_tokens()) self.scope.add(cpp.Function(classname, cursor.spelling, cursor.result_type.kind != TypeKind.VOID and 'return ' or '', tokens, cpp.CONSTRUCTOR)) def process_destructor(self, data, cursor): self.process_function(data, cursor) def process_type_alias(self,data,cursor): self.scope.add(cpp.Alias(cursor.spelling, clang_util.get_tokens(data.tu, cursor))) def process_variable_declaration(self,data,cursor): tokens = clang_util.get_tokens(data.tu, cursor) if tokens[-1].spelling != clang_util.semicolon and self.scope.get_open_scope().get_tokens(): tokens = clang_util.get_all_variable_tokens(tokens, self.scope.get_open_scope().get_tokens()) variable_declaration = util.concat(tokens, ' ') # in case that an underlying type is specified, # clang interprets enums at variables. # filter out these cases: if 'enum ' in variable_declaration: #TODO try to find a workaround for this return if clang_util.get_tokens(data.tu, cursor)[0].spelling == 'static': self.scope.add(cpp.StaticVariable(variable_declaration)) else: self.scope.add(cpp.Variable(variable_declaration)) def process_member_variable_declaration(self,data,cursor): self.process_variable_declaration(data, cursor) def process_forward_declaration(self,data,cursor): pass def process_enum(self,data,cursor): self.scope.add(cpp.ScopeEntry('enum', clang_util.get_enum_definition(data.tu, cursor))) def process_access_specifier(self, data, cursor): self.scope.add(cpp.AccessSpecifier(clang_util.get_tokens(data.tu, cursor)[0].spelling)) class Visitor(object): def visit(self,visited): pass def visit_function(self,function): return self.visit(function) def visit_template_function(self,function): return self.visit_function(function) def visit_constructor(self,constructor): return self.visit_function(constructor) def visit_destructor(self,destructor): return self.visit_function(destructor) def visit_operator(self,operator): return self.visit_function(operator) def visit_class(self,class_): return self.visit(class_) def visit_forward_declaration(self,forward_declaration): return self.visit(forward_declaration) def visit_template_class(self,template_class): return self.visit(template_class) def visit_namespace(self,namespace): return self.visit(namespace) def visit_inclusion_directive(self,inclusion_directive): return self.visit(inclusion_directive) def visit_access_specifier(self,access_specifier): return self.visit(access_specifier) def visit_variable(self,variable): return self.visit(variable) def visit_static_variable(self,variable): return self.visit(variable) def visit_alias(self,alias): return self.visit(alias) def visit_comment(self,comment): return self.visit(comment) class RecursionVisitor(Visitor): def visit_class(self,class_): for entry in class_.content: entry.visit(self) def visit_template_class(self,template_class): for entry in template_class.content: entry.visit(self) def visit_namespace(self,namespace): for entry in namespace.content: entry.visit(self) class ExtractPublicProtectedPrivateSections(RecursionVisitor): def __init__(self): self.private_section = [] self.protected_section = [] self.public_section = [] self.access_specifier = cpp.PRIVATE def visit_access_specifier(self,access_specifier): self.access_specifier = access_specifier.value def visit(self,entry): if self.access_specifier == cpp.PRIVATE: self.private_section.append(entry) elif self.access_specifier == cpp.PROTECTED: self.protected_section.append(entry) else: self.public_section.append(entry) def append_comment(comment,group): if comment: group.append(comment) return None class ExtractTypes(Visitor): def __init__(self): self.aliases = [] self.static_variables = [] self.constructors = [] self.destructor = [] self.operators = [] self.functions = [] self.forward_declarations = [] self.variables = [] self.comment = None def visit_comment(self,comment): self.comment = comment def visit_function(self,function): if function.type in [cpp.ASSIGNMENT_OPERATOR]: self.comment = append_comment(self.comment, self.operators) self.operators.append(function) if function.type in [cpp.FUNCTION, cpp.FUNCTION_TEMPLATE]: if function.name.startswith('operator'): self.comment = append_comment(self.comment, self.operators) self.operators.append(function) else: self.comment = append_comment(self.comment, self.functions) self.functions.append(function) elif function.type in [cpp.CONSTRUCTOR, cpp.CONSTRUCTOR_TEMPLATE]: self.comment = append_comment(self.comment, self.constructors) self.constructors.append(function) elif function.type == cpp.DESTRUCTOR: self.comment = append_comment(self.comment, self.destructor) self.destructor.append(function) def visit_variable(self,variable): self.comment = append_comment(self.comment,self.variables) self.variables.append(variable) def visit_static_variable(self,variable): self.comment = append_comment(self.comment,self.static_variables) self.static_variables.append(variable) def visit_alias(self,alias): self.comment = append_comment(self.comment,self.aliases) self.aliases.append(alias) def visit_forward_declaration(self,forward_declaration): self.forward_declarations.append(forward_declaration) def extend_section(new_section, section_part, with_separator=True): if new_section and section_part and with_separator: new_section.append(cpp.Separator()) new_section.extend(section_part) def sort_section(section): type_extractor = ExtractTypes() for entry in section: entry.visit(type_extractor) new_section = [] new_section.extend(type_extractor.aliases) extend_section(new_section, type_extractor.static_variables) extend_section(new_section, type_extractor.constructors) extend_section(new_section, type_extractor.destructor) extend_section(new_section, type_extractor.operators) extend_section(new_section, type_extractor.functions) extend_section(new_section, type_extractor.forward_declarations) extend_section(new_section, type_extractor.variables, with_separator=False) return new_section class SortClass(RecursionVisitor): def visit_class(self,class_): section_extractor = ExtractPublicProtectedPrivateSections() class_.visit(section_extractor) section_extractor.public_section = sort_section(section_extractor.public_section) section_extractor.protected_section = sort_section(section_extractor.protected_section) section_extractor.private_section = sort_section(section_extractor.private_section) class_.content = [] if section_extractor.public_section: class_.content.append(cpp.public_access) class_.content.extend(section_extractor.public_section) if section_extractor.protected_section: class_.content.append(cpp.protected_access) class_.content.extend(section_extractor.protected_section) if section_extractor.private_section: class_.content.append(cpp.private_access) class_.content.extend(section_extractor.private_section) def remove_inclusion_directives(main_scope): new_scope = [] for entry in main_scope.content: if not cpp.is_inclusion_directive(entry): new_scope.append(entry) main_scope.content = new_scope def remove_duplicate_inclusion_directives(main_scope): new_scope = [] for entry in main_scope.content: if cpp.is_inclusion_directive(entry): in_new_scope = False for new_entry in new_scope: if cpp.is_inclusion_directive(new_entry) and new_entry.value == entry.value: in_new_scope = True break if not in_new_scope: new_scope.append(entry) else: new_scope.append(entry) main_scope.content = new_scope def prepend_inclusion_directives(main_scope, inclusion_directives): for inclusion_directive in reversed(inclusion_directives): main_scope.content.insert(0, inclusion_directive) def append_inclusion_directive(main_scope, inclusion_directive): for i in range(len(main_scope.content)): if not cpp.is_inclusion_directive(main_scope.content[i]): main_scope.content.insert(i, inclusion_directive) return def append_inclusion_directives(main_scope, inclusion_directives): for inclusion_directive in inclusion_directives: append_inclusion_directive(main_scope, inclusion_directive) def add_comment(new_content, entry, comments): comment = util.get_comment(comments, entry) if comment: new_content.append(cpp.Comment(comment)) def add_comments(scope, comments): new_content = [] for entry in scope.content: if cpp.is_namespace(entry): add_comment(new_content, 'namespace ' + entry.name, comments) add_comments(entry, comments) elif cpp.is_class(entry) or cpp.is_struct(entry): add_comment(new_content, entry.type + ' ' + entry.name, comments) add_comments(entry, comments) elif entry.type in [cpp.FUNCTION, cpp.CONSTRUCTOR, cpp.DESTRUCTOR, cpp.FUNCTION_TEMPLATE, cpp.ASSIGNMENT_OPERATOR]: add_comment(new_content, entry.get_declaration(), comments) elif entry.type == cpp.ALIAS: add_comment(new_content, util.concat(entry.tokens, ' '), comments) new_content.append(entry) scope.content = new_content
mit