text
stringlengths
81
112k
Predicate for whether the open token is in an exception context :arg exceptions: list of strings or None :arg before_token: the text of the function up to the token delimiter :arg after_token: the text of the function after the token delimiter :arg token: the token (only if we're looking at a close delimiter :returns: bool def _is_exception(exceptions, before_token, after_token, token): """Predicate for whether the open token is in an exception context :arg exceptions: list of strings or None :arg before_token: the text of the function up to the token delimiter :arg after_token: the text of the function after the token delimiter :arg token: the token (only if we're looking at a close delimiter :returns: bool """ if not exceptions: return False for s in exceptions: if before_token.endswith(s): return True if s in token: return True return False
Collapses the text between two delimiters in a frame function value This collapses the text between two delimiters and either removes the text altogether or replaces it with a replacement string. There are certain contexts in which we might not want to collapse the text between two delimiters. These are denoted as "exceptions" and collapse will check for those exception strings occuring before the token to be replaced or inside the token to be replaced. Before:: IPC::ParamTraits<nsTSubstring<char> >::Write(IPC::Message *,nsTSubstring<char> const &) ^ ^ open token exception string occurring before open token Inside:: <rayon_core::job::HeapJob<BODY> as rayon_core::job::Job>::execute ^ ^^^^ exception string inside token open token :arg function: the function value from a frame to collapse tokens in :arg open_string: the open delimiter; e.g. ``(`` :arg close_string: the close delimiter; e.g. ``)`` :arg replacement: what to replace the token with; e.g. ``<T>`` :arg exceptions: list of strings denoting exceptions where we don't want to collapse the token :returns: new function string with tokens collapsed def collapse( function, open_string, close_string, replacement='', exceptions=None, ): """Collapses the text between two delimiters in a frame function value This collapses the text between two delimiters and either removes the text altogether or replaces it with a replacement string. There are certain contexts in which we might not want to collapse the text between two delimiters. These are denoted as "exceptions" and collapse will check for those exception strings occuring before the token to be replaced or inside the token to be replaced. Before:: IPC::ParamTraits<nsTSubstring<char> >::Write(IPC::Message *,nsTSubstring<char> const &) ^ ^ open token exception string occurring before open token Inside:: <rayon_core::job::HeapJob<BODY> as rayon_core::job::Job>::execute ^ ^^^^ exception string inside token open token :arg function: the function value from a frame to collapse tokens in :arg open_string: the open delimiter; e.g. ``(`` :arg close_string: the close delimiter; e.g. ``)`` :arg replacement: what to replace the token with; e.g. ``<T>`` :arg exceptions: list of strings denoting exceptions where we don't want to collapse the token :returns: new function string with tokens collapsed """ collapsed = [] open_count = 0 open_token = [] for i, char in enumerate(function): if not open_count: if char == open_string and not _is_exception(exceptions, function[:i], function[i + 1:], ''): # noqa open_count += 1 open_token = [char] else: collapsed.append(char) else: if char == open_string: open_count += 1 open_token.append(char) elif char == close_string: open_count -= 1 open_token.append(char) if open_count == 0: token = ''.join(open_token) if _is_exception(exceptions, function[:i], function[i + 1:], token): collapsed.append(''.join(open_token)) else: collapsed.append(replacement) open_token = [] else: open_token.append(char) if open_count: token = ''.join(open_token) if _is_exception(exceptions, function[:i], function[i + 1:], token): collapsed.append(''.join(open_token)) else: collapsed.append(replacement) return ''.join(collapsed)
Takes the function value from a frame and drops prefix and return type For example:: static void * Allocator<MozJemallocBase>::malloc(unsigned __int64) ^ ^^^^^^ return type prefix This gets changes to this:: Allocator<MozJemallocBase>::malloc(unsigned __int64) This tokenizes on space, but takes into account types, generics, traits, function arguments, and other parts of the function signature delimited by things like `', <>, {}, [], and () for both C/C++ and Rust. After tokenizing, this returns the last token since that's comprised of the function name and its arguments. :arg function: the function value in a frame to drop bits from :returns: adjusted function value def drop_prefix_and_return_type(function): """Takes the function value from a frame and drops prefix and return type For example:: static void * Allocator<MozJemallocBase>::malloc(unsigned __int64) ^ ^^^^^^ return type prefix This gets changes to this:: Allocator<MozJemallocBase>::malloc(unsigned __int64) This tokenizes on space, but takes into account types, generics, traits, function arguments, and other parts of the function signature delimited by things like `', <>, {}, [], and () for both C/C++ and Rust. After tokenizing, this returns the last token since that's comprised of the function name and its arguments. :arg function: the function value in a frame to drop bits from :returns: adjusted function value """ DELIMITERS = { '(': ')', '{': '}', '[': ']', '<': '>', '`': "'" } OPEN = DELIMITERS.keys() CLOSE = DELIMITERS.values() # The list of tokens accumulated so far tokens = [] # Keeps track of open delimiters so we can match and close them levels = [] # The current token we're building current = [] for i, char in enumerate(function): if char in OPEN: levels.append(char) current.append(char) elif char in CLOSE: if levels and DELIMITERS[levels[-1]] == char: levels.pop() current.append(char) else: # This is an unmatched close. current.append(char) elif levels: current.append(char) elif char == ' ': tokens.append(''.join(current)) current = [] else: current.append(char) if current: tokens.append(''.join(current)) while len(tokens) > 1 and tokens[-1].startswith(('(', '[clone')): # It's possible for the function signature to have a space between # the function name and the parenthesized arguments or [clone ...] # thing. If that's the case, we join the last two tokens. We keep doing # that until the last token is nice. # # Example: # # somefunc (int arg1, int arg2) # ^ # somefunc(int arg1, int arg2) [clone .cold.111] # ^ # somefunc(int arg1, int arg2) [clone .cold.111] [clone .cold.222] # ^ ^ tokens = tokens[:-2] + [' '.join(tokens[-2:])] return tokens[-1]
Wait for connections to be made and their handshakes to finish :param conns: a single or list of (host, port) tuples with the connections that must be finished before the method will return. defaults to all the peers the :class:`Hub` was instantiated with. :param timeout: maximum time to wait in seconds. with None, there is no timeout. :type timeout: float or None :returns: ``True`` if all connections were made, ``False`` one or more failed. def wait_connected(self, conns=None, timeout=None): '''Wait for connections to be made and their handshakes to finish :param conns: a single or list of (host, port) tuples with the connections that must be finished before the method will return. defaults to all the peers the :class:`Hub` was instantiated with. :param timeout: maximum time to wait in seconds. with None, there is no timeout. :type timeout: float or None :returns: ``True`` if all connections were made, ``False`` one or more failed. ''' if timeout: deadline = time.time() + timeout conns = conns or self._started_peers.keys() if not hasattr(conns, "__iter__"): conns = [conns] for peer_addr in conns: remaining = max(0, deadline - time.time()) if timeout else None if not self._started_peers[peer_addr].wait_connected(remaining): if timeout: log.warn("connect wait timed out after %.2f seconds" % timeout) return False return True
Close all peer connections and stop listening for new ones def shutdown(self): 'Close all peer connections and stop listening for new ones' log.info("shutting down") for peer in self._dispatcher.peers.values(): peer.go_down(reconnect=False) if self._listener_coro: backend.schedule_exception( errors._BailOutOfListener(), self._listener_coro) if self._udp_listener_coro: backend.schedule_exception( errors._BailOutOfListener(), self._udp_listener_coro)
Set a handler for incoming publish messages :param service: the incoming message must have this service :type service: anything hash-able :param mask: value to be bitwise-and'ed against the incoming id, the result of which must mask the 'value' param :type mask: int :param value: the result of `routing_id & mask` must match this in order to trigger the handler :type value: int :param method: the method name :type method: string :param handler: the function that will be called on incoming matching messages :type handler: callable :param schedule: whether to schedule a separate greenlet running ``handler`` for each matching message. default ``False``. :type schedule: bool :raises: - :class:`ImpossibleSubscription <junction.errors.ImpossibleSubscription>` if there is no routing ID which could possibly match the mask/value pair - :class:`OverlappingSubscription <junction.errors.OverlappingSubscription>` if a prior publish registration that overlaps with this one (there is a service/method/routing id that would match *both* this *and* a previously-made registration). def accept_publish( self, service, mask, value, method, handler=None, schedule=False): '''Set a handler for incoming publish messages :param service: the incoming message must have this service :type service: anything hash-able :param mask: value to be bitwise-and'ed against the incoming id, the result of which must mask the 'value' param :type mask: int :param value: the result of `routing_id & mask` must match this in order to trigger the handler :type value: int :param method: the method name :type method: string :param handler: the function that will be called on incoming matching messages :type handler: callable :param schedule: whether to schedule a separate greenlet running ``handler`` for each matching message. default ``False``. :type schedule: bool :raises: - :class:`ImpossibleSubscription <junction.errors.ImpossibleSubscription>` if there is no routing ID which could possibly match the mask/value pair - :class:`OverlappingSubscription <junction.errors.OverlappingSubscription>` if a prior publish registration that overlaps with this one (there is a service/method/routing id that would match *both* this *and* a previously-made registration). ''' # support @hub.accept_publish(serv, mask, val, meth) decorator usage if handler is None: return lambda h: self.accept_publish( service, mask, value, method, h, schedule) log.info("accepting publishes%s %r" % ( " scheduled" if schedule else "", (service, (mask, value), method),)) self._dispatcher.add_local_subscription(const.MSG_TYPE_PUBLISH, service, mask, value, method, handler, schedule) return handler
Remove a publish subscription :param service: the service of the subscription to remove :type service: anything hash-able :param mask: the mask of the subscription to remove :type mask: int :param value: the value in the subscription to remove :type value: int :returns: a boolean indicating whether the subscription was there (True) and removed, or not (False) def unsubscribe_publish(self, service, mask, value): '''Remove a publish subscription :param service: the service of the subscription to remove :type service: anything hash-able :param mask: the mask of the subscription to remove :type mask: int :param value: the value in the subscription to remove :type value: int :returns: a boolean indicating whether the subscription was there (True) and removed, or not (False) ''' log.info("unsubscribing from publish %r" % ( (service, (mask, value)),)) return self._dispatcher.remove_local_subscription( const.MSG_TYPE_PUBLISH, service, mask, value)
Send a 1-way message :param service: the service name (the routing top level) :type service: anything hash-able :param int routing_id: the id used for routing within the registered handlers of the service :param string method: the method name to call :param tuple args: The positional arguments to send along with the request. If the first positional argument is a generator object, the publish will be sent in chunks :ref:`(more info) <chunked-messages>`. :param dict kwargs: keyword arguments to send along with the request :param bool broadcast: if ``True``, send to every peer with a matching subscription. :param bool udp: deliver the message over UDP instead of the usual TCP :returns: None. use 'rpc' methods for requests with responses. :raises: :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message def publish(self, service, routing_id, method, args=None, kwargs=None, broadcast=False, udp=False): '''Send a 1-way message :param service: the service name (the routing top level) :type service: anything hash-able :param int routing_id: the id used for routing within the registered handlers of the service :param string method: the method name to call :param tuple args: The positional arguments to send along with the request. If the first positional argument is a generator object, the publish will be sent in chunks :ref:`(more info) <chunked-messages>`. :param dict kwargs: keyword arguments to send along with the request :param bool broadcast: if ``True``, send to every peer with a matching subscription. :param bool udp: deliver the message over UDP instead of the usual TCP :returns: None. use 'rpc' methods for requests with responses. :raises: :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message ''' if udp: func = self._dispatcher.send_publish_udp else: func = self._dispatcher.send_publish if not func(None, service, routing_id, method, args or (), kwargs or {}, singular=not broadcast): raise errors.Unroutable()
Get the number of peers that would handle a particular publish :param service: the service name :type service: anything hash-able :param routing_id: the id used for limiting the service handlers :type routing_id: int def publish_receiver_count(self, service, routing_id): '''Get the number of peers that would handle a particular publish :param service: the service name :type service: anything hash-able :param routing_id: the id used for limiting the service handlers :type routing_id: int ''' peers = len(list(self._dispatcher.find_peer_routes( const.MSG_TYPE_PUBLISH, service, routing_id))) if self._dispatcher.locally_handles(const.MSG_TYPE_PUBLISH, service, routing_id): return peers + 1 return peers
Set a handler for incoming RPCs :param service: the incoming RPC must have this service :type service: anything hash-able :param mask: value to be bitwise-and'ed against the incoming id, the result of which must mask the 'value' param :type mask: int :param value: the result of `routing_id & mask` must match this in order to trigger the handler :type value: int :param method: the method name to trigger handler :type method: string :param handler: the function that will be called on incoming matching RPC requests :type handler: callable :param schedule: whether to schedule a separate greenlet running ``handler`` for each matching message. default ``True``. :type schedule: bool :raises: - :class:`ImpossibleSubscription <junction.errors.ImpossibleSubscription>` if there is no routing ID which could possibly match the mask/value pair - :class:`OverlappingSubscription <junction.errors.OverlappingSubscription>` if a prior rpc registration that overlaps with this one (there is a service/method/routing id that would match *both* this *and* a previously-made registration). def accept_rpc(self, service, mask, value, method, handler=None, schedule=True): '''Set a handler for incoming RPCs :param service: the incoming RPC must have this service :type service: anything hash-able :param mask: value to be bitwise-and'ed against the incoming id, the result of which must mask the 'value' param :type mask: int :param value: the result of `routing_id & mask` must match this in order to trigger the handler :type value: int :param method: the method name to trigger handler :type method: string :param handler: the function that will be called on incoming matching RPC requests :type handler: callable :param schedule: whether to schedule a separate greenlet running ``handler`` for each matching message. default ``True``. :type schedule: bool :raises: - :class:`ImpossibleSubscription <junction.errors.ImpossibleSubscription>` if there is no routing ID which could possibly match the mask/value pair - :class:`OverlappingSubscription <junction.errors.OverlappingSubscription>` if a prior rpc registration that overlaps with this one (there is a service/method/routing id that would match *both* this *and* a previously-made registration). ''' # support @hub.accept_rpc(serv, mask, val, meth) decorator usage if handler is None: return lambda h: self.accept_rpc( service, mask, value, method, h, schedule) log.info("accepting RPCs%s %r" % ( " scheduled" if schedule else "", (service, (mask, value), method),)) self._dispatcher.add_local_subscription(const.MSG_TYPE_RPC_REQUEST, service, mask, value, method, handler, schedule) return handler
Remove a rpc subscription :param service: the service of the subscription to remove :type service: anything hash-able :param mask: the mask of the subscription to remove :type mask: int :param value: the value in the subscription to remove :type value: int :param handler: the handler function of the subscription to remove :type handler: callable :returns: a boolean indicating whether the subscription was there (True) and removed, or not (False) def unsubscribe_rpc(self, service, mask, value): '''Remove a rpc subscription :param service: the service of the subscription to remove :type service: anything hash-able :param mask: the mask of the subscription to remove :type mask: int :param value: the value in the subscription to remove :type value: int :param handler: the handler function of the subscription to remove :type handler: callable :returns: a boolean indicating whether the subscription was there (True) and removed, or not (False) ''' log.info("unsubscribing from RPC %r" % ((service, (mask, value)),)) return self._dispatcher.remove_local_subscription( const.MSG_TYPE_RPC_REQUEST, service, mask, value)
Send out an RPC request :param service: the service name (the routing top level) :type service: anything hash-able :param routing_id: The id used for routing within the registered handlers of the service. :type routing_id: int :param method: the method name to call :type method: string :param args: The positional arguments to send along with the request. If the first argument is a generator, the request will be sent in chunks :ref:`(more info) <chunked-messages>`. :type args: tuple :param kwargs: keyword arguments to send along with the request :type kwargs: dict :param broadcast: if ``True``, send to every peer with a matching subscription :type broadcast: bool :returns: a :class:`RPC <junction.futures.RPC>` object representing the RPC and its future response. :raises: :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message def send_rpc(self, service, routing_id, method, args=None, kwargs=None, broadcast=False): '''Send out an RPC request :param service: the service name (the routing top level) :type service: anything hash-able :param routing_id: The id used for routing within the registered handlers of the service. :type routing_id: int :param method: the method name to call :type method: string :param args: The positional arguments to send along with the request. If the first argument is a generator, the request will be sent in chunks :ref:`(more info) <chunked-messages>`. :type args: tuple :param kwargs: keyword arguments to send along with the request :type kwargs: dict :param broadcast: if ``True``, send to every peer with a matching subscription :type broadcast: bool :returns: a :class:`RPC <junction.futures.RPC>` object representing the RPC and its future response. :raises: :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message ''' rpc = self._dispatcher.send_rpc(service, routing_id, method, args or (), kwargs or {}, not broadcast) if not rpc: raise errors.Unroutable() return rpc
Send an RPC request and return the corresponding response This will block waiting until the response has been received. :param service: the service name (the routing top level) :type service: anything hash-able :param routing_id: The id used for routing within the registered handlers of the service. :type routing_id: int :param method: the method name to call :type method: string :param args: The positional arguments to send along with the request. If the first argument is a generator, the request will be sent in chunks :ref:`(more info) <chunked-messages>`. :type args: tuple :param kwargs: keyword arguments to send along with the request :type kwargs: dict :param timeout: maximum time to wait for a response in seconds. with None, there is no timeout. :type timeout: float or None :param broadcast: if ``True``, send to every peer with a matching subscription :type broadcast: bool :returns: a list of the objects returned by the RPC's targets. these could be of any serializable type. :raises: - :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message - :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout was provided and it expires def rpc(self, service, routing_id, method, args=None, kwargs=None, timeout=None, broadcast=False): '''Send an RPC request and return the corresponding response This will block waiting until the response has been received. :param service: the service name (the routing top level) :type service: anything hash-able :param routing_id: The id used for routing within the registered handlers of the service. :type routing_id: int :param method: the method name to call :type method: string :param args: The positional arguments to send along with the request. If the first argument is a generator, the request will be sent in chunks :ref:`(more info) <chunked-messages>`. :type args: tuple :param kwargs: keyword arguments to send along with the request :type kwargs: dict :param timeout: maximum time to wait for a response in seconds. with None, there is no timeout. :type timeout: float or None :param broadcast: if ``True``, send to every peer with a matching subscription :type broadcast: bool :returns: a list of the objects returned by the RPC's targets. these could be of any serializable type. :raises: - :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message - :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout was provided and it expires ''' rpc = self.send_rpc(service, routing_id, method, args or (), kwargs or {}, broadcast) return rpc.get(timeout)
Get the number of peers that would handle a particular RPC :param service: the service name :type service: anything hash-able :param routing_id: the id used for narrowing within the service handlers :type routing_id: int :returns: the integer number of peers that would receive the described RPC def rpc_receiver_count(self, service, routing_id): '''Get the number of peers that would handle a particular RPC :param service: the service name :type service: anything hash-able :param routing_id: the id used for narrowing within the service handlers :type routing_id: int :returns: the integer number of peers that would receive the described RPC ''' peers = len(list(self._dispatcher.find_peer_routes( const.MSG_TYPE_RPC_REQUEST, service, routing_id))) if self._dispatcher.locally_handles(const.MSG_TYPE_RPC_REQUEST, service, routing_id): return peers + 1 return peers
Start up the hub's server, and have it start initiating connections def start(self): "Start up the hub's server, and have it start initiating connections" log.info("starting") self._listener_coro = backend.greenlet(self._listener) self._udp_listener_coro = backend.greenlet(self._udp_listener) backend.schedule(self._listener_coro) backend.schedule(self._udp_listener_coro) for addr in self._peers: self.add_peer(addr)
Build a connection to the Hub at a given ``(host, port)`` address def add_peer(self, peer_addr): "Build a connection to the Hub at a given ``(host, port)`` address" peer = connection.Peer( self._ident, self._dispatcher, peer_addr, backend.Socket()) peer.start() self._started_peers[peer_addr] = peer
list of the (host, port) pairs of all connected peer Hubs def peers(self): "list of the (host, port) pairs of all connected peer Hubs" return [addr for (addr, peer) in self._dispatcher.peers.items() if peer.up]
Takes crash data via args and generates a Socorro signature def main(argv=None): """Takes crash data via args and generates a Socorro signature """ parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG) parser.add_argument( '-v', '--verbose', help='increase output verbosity', action='store_true' ) parser.add_argument( '--format', help='specify output format: csv, text (default)' ) parser.add_argument( '--different-only', dest='different', action='store_true', help='limit output to just the signatures that changed', ) parser.add_argument( 'crashids', metavar='crashid', nargs='*', help='crash id to generate signatures for' ) if argv is None: args = parser.parse_args() else: args = parser.parse_args(argv) if args.format == 'csv': outputter = CSVOutput else: outputter = TextOutput api_token = os.environ.get('SOCORRO_API_TOKEN', '') generator = SignatureGenerator() if args.crashids: crashids_iterable = args.crashids elif not sys.stdin.isatty(): # If a script is piping to this script, then isatty() returns False. If # there is no script piping to this script, then isatty() returns True # and if we do list(sys.stdin), it'll block waiting for input. crashids_iterable = list(sys.stdin) else: crashids_iterable = [] if not crashids_iterable: parser.print_help() return 0 with outputter() as out: for crash_id in crashids_iterable: crash_id = crash_id.strip() resp = fetch('/RawCrash/', crash_id, api_token) if resp.status_code == 404: out.warning('%s: does not exist.' % crash_id) continue if resp.status_code == 429: out.warning('API rate limit reached. %s' % resp.content) # FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a # few minutes. return 1 if resp.status_code == 500: out.warning('HTTP 500: %s' % resp.content) continue raw_crash = resp.json() # If there's an error in the raw crash, then something is wrong--probably with the API # token. So print that out and exit. if 'error' in raw_crash: out.warning('Error fetching raw crash: %s' % raw_crash['error']) return 1 resp = fetch('/ProcessedCrash/', crash_id, api_token) if resp.status_code == 404: out.warning('%s: does not have processed crash.' % crash_id) continue if resp.status_code == 429: out.warning('API rate limit reached. %s' % resp.content) # FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a # few minutes. return 1 if resp.status_code == 500: out.warning('HTTP 500: %s' % resp.content) continue processed_crash = resp.json() # If there's an error in the processed crash, then something is wrong--probably with the # API token. So print that out and exit. if 'error' in processed_crash: out.warning('Error fetching processed crash: %s' % processed_crash['error']) return 1 old_signature = processed_crash['signature'] crash_data = convert_to_crash_data(raw_crash, processed_crash) result = generator.generate(crash_data) if not args.different or old_signature != result.signature: out.data(crash_id, old_signature, result, args.verbose)
Send result to the Skinken WS def send_result(self, return_code, output, service_description='', time_stamp=0, specific_servers=None): ''' Send result to the Skinken WS ''' if time_stamp == 0: time_stamp = int(time.time()) if specific_servers == None: specific_servers = self.servers else: specific_servers = set(self.servers).intersection(specific_servers) for server in specific_servers: post_data = {} post_data['time_stamp'] = time_stamp post_data['host_name'] = self.servers[server]['custom_fqdn'] post_data['service_description'] = service_description post_data['return_code'] = return_code post_data['output'] = output if self.servers[server]['availability']: url = '%s://%s:%s%s' % (self.servers[server]['protocol'], self.servers[server]['host'], self.servers[server]['port'], self.servers[server]['uri']) auth = (self.servers[server]['username'], self.servers[server]['password']) try: response = requests.post(url, auth=auth, headers=self.http_headers, verify=self.servers[server]['verify'], timeout=self.servers[server]['timeout'], data=post_data) if response.status_code == 400: LOG.error("[ws_shinken][%s]: HTTP status: %s - The content of the WebService call is incorrect", server, response.status_code) elif response.status_code == 401: LOG.error("[ws_shinken][%s]: HTTP status: %s - You must provide an username and password", server, response.status_code) elif response.status_code == 403: LOG.error("[ws_shinken][%s]: HTTP status: %s - The username or password is wrong", server, response.status_code) elif response.status_code != 200: LOG.error("[ws_shinken][%s]: HTTP status: %s", server, response.status_code) except (requests.ConnectionError, requests.Timeout), error: self.servers[server]['availability'] = False LOG.error(error) else: LOG.error("[ws_shinken][%s]: Data not sent, server is unavailable", server) if self.servers[server]['availability'] == False and self.servers[server]['cache'] == True: self.servers[server]['csv'].writerow(post_data) LOG.info("[ws_shinken][%s]: Data cached", server)
Close cache of WS Shinken def close_cache(self): ''' Close cache of WS Shinken ''' # Close all WS_Shinken cache files for server in self.servers: if self.servers[server]['cache'] == True: self.servers[server]['file'].close()
Create apidoc dir, delete contents if delete is True. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param directory: the apidoc directory. you can use relative paths here :type directory: str :param delete: if True, deletes the contents of apidoc. This acts like an override switch. :type delete: bool :returns: None :rtype: None :raises: None def prepare_dir(app, directory, delete=False): """Create apidoc dir, delete contents if delete is True. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param directory: the apidoc directory. you can use relative paths here :type directory: str :param delete: if True, deletes the contents of apidoc. This acts like an override switch. :type delete: bool :returns: None :rtype: None :raises: None """ logger.info("Preparing output directories for jinjaapidoc.") if os.path.exists(directory): if delete: logger.debug("Deleting dir %s", directory) shutil.rmtree(directory) logger.debug("Creating dir %s", directory) os.mkdir(directory) else: logger.debug("Creating %s", directory) os.mkdir(directory)
Join package and module with a dot. Package or Module can be empty. :param package: the package name :type package: :class:`str` :param module: the module name :type module: :class:`str` :returns: the joined name :rtype: :class:`str` :raises: :class:`AssertionError`, if both package and module are empty def makename(package, module): """Join package and module with a dot. Package or Module can be empty. :param package: the package name :type package: :class:`str` :param module: the module name :type module: :class:`str` :returns: the joined name :rtype: :class:`str` :raises: :class:`AssertionError`, if both package and module are empty """ # Both package and module can be None/empty. assert package or module, "Specify either package or module" if package: name = package if module: name += '.' + module else: name = module return name
Write the output file for module/package <name>. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param name: the file name without file extension :type name: :class:`str` :param text: the content of the file :type text: :class:`str` :param dest: the output directory :type dest: :class:`str` :param suffix: the file extension :type suffix: :class:`str` :param dryrun: If True, do not create any files, just log the potential location. :type dryrun: :class:`bool` :param force: Overwrite existing files :type force: :class:`bool` :returns: None :raises: None def write_file(app, name, text, dest, suffix, dryrun, force): """Write the output file for module/package <name>. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param name: the file name without file extension :type name: :class:`str` :param text: the content of the file :type text: :class:`str` :param dest: the output directory :type dest: :class:`str` :param suffix: the file extension :type suffix: :class:`str` :param dryrun: If True, do not create any files, just log the potential location. :type dryrun: :class:`bool` :param force: Overwrite existing files :type force: :class:`bool` :returns: None :raises: None """ fname = os.path.join(dest, '%s.%s' % (name, suffix)) if dryrun: logger.info('Would create file %s.' % fname) return if not force and os.path.isfile(fname): logger.info('File %s already exists, skipping.' % fname) else: logger.info('Creating file %s.' % fname) f = open(fname, 'w') try: f.write(text) relpath = os.path.relpath(fname, start=app.env.srcdir) abspath = os.sep + relpath docpath = app.env.relfn2path(abspath)[0] docpath = docpath.rsplit(os.path.extsep, 1)[0] logger.debug('Adding document %s' % docpath) app.env.found_docs.add(docpath) finally: f.close()
Import the given name and return name, obj, parent, mod_name :param name: name to import :type name: str :returns: the imported object or None :rtype: object | None :raises: None def import_name(app, name): """Import the given name and return name, obj, parent, mod_name :param name: name to import :type name: str :returns: the imported object or None :rtype: object | None :raises: None """ try: logger.debug('Importing %r', name) name, obj = autosummary.import_by_name(name)[:2] logger.debug('Imported %s', obj) return obj except ImportError as e: logger.warn("Jinjapidoc failed to import %r: %s", name, e)
Return the members of mod of the given type :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param mod: the module with members :type mod: module :param typ: the typ, ``'class'``, ``'function'``, ``'exception'``, ``'data'``, ``'members'`` :type typ: str :param include_public: list of private members to include to publics :type include_public: list | None :returns: None :rtype: None :raises: None def get_members(app, mod, typ, include_public=None): """Return the members of mod of the given type :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param mod: the module with members :type mod: module :param typ: the typ, ``'class'``, ``'function'``, ``'exception'``, ``'data'``, ``'members'`` :type typ: str :param include_public: list of private members to include to publics :type include_public: list | None :returns: None :rtype: None :raises: None """ def include_here(x): """Return true if the member should be included in mod. A member will be included if it is declared in this module or package. If the `jinjaapidoc_include_from_all` option is `True` then the member can also be included if it is listed in `__all__`. :param x: The member :type x: A class, exception, or function. :returns: True if the member should be included in mod. False otherwise. :rtype: bool """ return (x.__module__ == mod.__name__ or (include_from_all and x.__name__ in all_list)) all_list = getattr(mod, '__all__', []) include_from_all = app.config.jinjaapi_include_from_all include_public = include_public or [] tests = {'class': lambda x: inspect.isclass(x) and not issubclass(x, BaseException) and include_here(x), 'function': lambda x: inspect.isfunction(x) and include_here(x), 'exception': lambda x: inspect.isclass(x) and issubclass(x, BaseException) and include_here(x), 'data': lambda x: not inspect.ismodule(x) and not inspect.isclass(x) and not inspect.isfunction(x), 'members': lambda x: True} items = [] for name in dir(mod): i = getattr(mod, name) inspect.ismodule(i) if tests.get(typ, lambda x: False)(i): items.append(name) public = [x for x in items if x in include_public or not x.startswith('_')] logger.debug('Got members of %s of type %s: public %s and %s', mod, typ, public, items) return public, items
Get all submodules for the given module/package :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param module: the module to query or module path :type module: module | str :returns: list of module names and boolean whether its a package :rtype: list :raises: TypeError def _get_submodules(app, module): """Get all submodules for the given module/package :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param module: the module to query or module path :type module: module | str :returns: list of module names and boolean whether its a package :rtype: list :raises: TypeError """ if inspect.ismodule(module): if hasattr(module, '__path__'): p = module.__path__ else: return [] elif isinstance(module, str): p = module else: raise TypeError("Only Module or String accepted. %s given." % type(module)) logger.debug('Getting submodules of %s', p) submodules = [(name, ispkg) for loader, name, ispkg in pkgutil.iter_modules(p)] logger.debug('Found submodules of %s: %s', module, submodules) return submodules
Get all submodules without packages for the given module/package :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param module: the module to query or module path :type module: module | str :returns: list of module names excluding packages :rtype: list :raises: TypeError def get_submodules(app, module): """Get all submodules without packages for the given module/package :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param module: the module to query or module path :type module: module | str :returns: list of module names excluding packages :rtype: list :raises: TypeError """ submodules = _get_submodules(app, module) return [name for name, ispkg in submodules if not ispkg]
Get all subpackages for the given module/package :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param module: the module to query or module path :type module: module | str :returns: list of packages names :rtype: list :raises: TypeError def get_subpackages(app, module): """Get all subpackages for the given module/package :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param module: the module to query or module path :type module: module | str :returns: list of packages names :rtype: list :raises: TypeError """ submodules = _get_submodules(app, module) return [name for name, ispkg in submodules if ispkg]
Return a dict for template rendering Variables: * :package: The top package * :module: the module * :fullname: package.module * :subpkgs: packages beneath module * :submods: modules beneath module * :classes: public classes in module * :allclasses: public and private classes in module * :exceptions: public exceptions in module * :allexceptions: public and private exceptions in module * :functions: public functions in module * :allfunctions: public and private functions in module * :data: public data in module * :alldata: public and private data in module * :members: dir(module) :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param package: the parent package name :type package: str :param module: the module name :type module: str :param fullname: package.module :type fullname: str :returns: a dict with variables for template rendering :rtype: :class:`dict` :raises: None def get_context(app, package, module, fullname): """Return a dict for template rendering Variables: * :package: The top package * :module: the module * :fullname: package.module * :subpkgs: packages beneath module * :submods: modules beneath module * :classes: public classes in module * :allclasses: public and private classes in module * :exceptions: public exceptions in module * :allexceptions: public and private exceptions in module * :functions: public functions in module * :allfunctions: public and private functions in module * :data: public data in module * :alldata: public and private data in module * :members: dir(module) :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param package: the parent package name :type package: str :param module: the module name :type module: str :param fullname: package.module :type fullname: str :returns: a dict with variables for template rendering :rtype: :class:`dict` :raises: None """ var = {'package': package, 'module': module, 'fullname': fullname} logger.debug('Creating context for: package %s, module %s, fullname %s', package, module, fullname) obj = import_name(app, fullname) if not obj: for k in ('subpkgs', 'submods', 'classes', 'allclasses', 'exceptions', 'allexceptions', 'functions', 'allfunctions', 'data', 'alldata', 'memebers'): var[k] = [] return var var['subpkgs'] = get_subpackages(app, obj) var['submods'] = get_submodules(app, obj) var['classes'], var['allclasses'] = get_members(app, obj, 'class') var['exceptions'], var['allexceptions'] = get_members(app, obj, 'exception') var['functions'], var['allfunctions'] = get_members(app, obj, 'function') var['data'], var['alldata'] = get_members(app, obj, 'data') var['members'] = get_members(app, obj, 'members') logger.debug('Created context: %s', var) return var
Build the text of the file and write the file. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param env: the jinja environment for the templates :type env: :class:`jinja2.Environment` :param package: the package name :type package: :class:`str` :param module: the module name :type module: :class:`str` :param dest: the output directory :type dest: :class:`str` :param suffix: the file extension :type suffix: :class:`str` :param dryrun: If True, do not create any files, just log the potential location. :type dryrun: :class:`bool` :param force: Overwrite existing files :type force: :class:`bool` :returns: None :raises: None def create_module_file(app, env, package, module, dest, suffix, dryrun, force): """Build the text of the file and write the file. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param env: the jinja environment for the templates :type env: :class:`jinja2.Environment` :param package: the package name :type package: :class:`str` :param module: the module name :type module: :class:`str` :param dest: the output directory :type dest: :class:`str` :param suffix: the file extension :type suffix: :class:`str` :param dryrun: If True, do not create any files, just log the potential location. :type dryrun: :class:`bool` :param force: Overwrite existing files :type force: :class:`bool` :returns: None :raises: None """ logger.debug('Create module file: package %s, module %s', package, module) template_file = MODULE_TEMPLATE_NAME template = env.get_template(template_file) fn = makename(package, module) var = get_context(app, package, module, fn) var['ispkg'] = False rendered = template.render(var) write_file(app, makename(package, module), rendered, dest, suffix, dryrun, force)
Build the text of the file and write the file. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param env: the jinja environment for the templates :type env: :class:`jinja2.Environment` :param root_package: the parent package :type root_package: :class:`str` :param sub_package: the package name without root :type sub_package: :class:`str` :param private: Include \"_private\" modules :type private: :class:`bool` :param dest: the output directory :type dest: :class:`str` :param suffix: the file extension :type suffix: :class:`str` :param dryrun: If True, do not create any files, just log the potential location. :type dryrun: :class:`bool` :param force: Overwrite existing files :type force: :class:`bool` :returns: None :raises: None def create_package_file(app, env, root_package, sub_package, private, dest, suffix, dryrun, force): """Build the text of the file and write the file. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param env: the jinja environment for the templates :type env: :class:`jinja2.Environment` :param root_package: the parent package :type root_package: :class:`str` :param sub_package: the package name without root :type sub_package: :class:`str` :param private: Include \"_private\" modules :type private: :class:`bool` :param dest: the output directory :type dest: :class:`str` :param suffix: the file extension :type suffix: :class:`str` :param dryrun: If True, do not create any files, just log the potential location. :type dryrun: :class:`bool` :param force: Overwrite existing files :type force: :class:`bool` :returns: None :raises: None """ logger.debug('Create package file: rootpackage %s, sub_package %s', root_package, sub_package) template_file = PACKAGE_TEMPLATE_NAME template = env.get_template(template_file) fn = makename(root_package, sub_package) var = get_context(app, root_package, sub_package, fn) var['ispkg'] = True for submod in var['submods']: if shall_skip(app, submod, private): continue create_module_file(app, env, fn, submod, dest, suffix, dryrun, force) rendered = template.render(var) write_file(app, fn, rendered, dest, suffix, dryrun, force)
Check if we want to skip this module. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param module: the module name :type module: :class:`str` :param private: True, if privates are allowed :type private: :class:`bool` def shall_skip(app, module, private): """Check if we want to skip this module. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param module: the module name :type module: :class:`str` :param private: True, if privates are allowed :type private: :class:`bool` """ logger.debug('Testing if %s should be skipped.', module) # skip if it has a "private" name and this is selected if module != '__init__.py' and module.startswith('_') and \ not private: logger.debug('Skip %s because its either private or __init__.', module) return True logger.debug('Do not skip %s', module) return False
Look for every file in the directory tree and create the corresponding ReST files. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param env: the jinja environment :type env: :class:`jinja2.Environment` :param src: the path to the python source files :type src: :class:`str` :param dest: the output directory :type dest: :class:`str` :param excludes: the paths to exclude :type excludes: :class:`list` :param followlinks: follow symbolic links :type followlinks: :class:`bool` :param force: overwrite existing files :type force: :class:`bool` :param dryrun: do not generate files :type dryrun: :class:`bool` :param private: include "_private" modules :type private: :class:`bool` :param suffix: the file extension :type suffix: :class:`str` def recurse_tree(app, env, src, dest, excludes, followlinks, force, dryrun, private, suffix): """Look for every file in the directory tree and create the corresponding ReST files. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param env: the jinja environment :type env: :class:`jinja2.Environment` :param src: the path to the python source files :type src: :class:`str` :param dest: the output directory :type dest: :class:`str` :param excludes: the paths to exclude :type excludes: :class:`list` :param followlinks: follow symbolic links :type followlinks: :class:`bool` :param force: overwrite existing files :type force: :class:`bool` :param dryrun: do not generate files :type dryrun: :class:`bool` :param private: include "_private" modules :type private: :class:`bool` :param suffix: the file extension :type suffix: :class:`str` """ # check if the base directory is a package and get its name if INITPY in os.listdir(src): root_package = src.split(os.path.sep)[-1] else: # otherwise, the base is a directory with packages root_package = None toplevels = [] for root, subs, files in walk(src, followlinks=followlinks): # document only Python module files (that aren't excluded) py_files = sorted(f for f in files if os.path.splitext(f)[1] in PY_SUFFIXES and # noqa: W504 not is_excluded(os.path.join(root, f), excludes)) is_pkg = INITPY in py_files if is_pkg: py_files.remove(INITPY) py_files.insert(0, INITPY) elif root != src: # only accept non-package at toplevel del subs[:] continue # remove hidden ('.') and private ('_') directories, as well as # excluded dirs if private: exclude_prefixes = ('.',) else: exclude_prefixes = ('.', '_') subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and not is_excluded(os.path.join(root, sub), excludes)) if is_pkg: # we are in a package with something to document if subs or len(py_files) > 1 or not \ shall_skip(app, os.path.join(root, INITPY), private): subpackage = root[len(src):].lstrip(os.path.sep).\ replace(os.path.sep, '.') create_package_file(app, env, root_package, subpackage, private, dest, suffix, dryrun, force) toplevels.append(makename(root_package, subpackage)) else: # if we are at the root level, we don't require it to be a package assert root == src and root_package is None for py_file in py_files: if not shall_skip(app, os.path.join(src, py_file), private): module = os.path.splitext(py_file)[0] create_module_file(app, env, root_package, module, dest, suffix, dryrun, force) toplevels.append(module) return toplevels
Normalize the excluded directory list. def normalize_excludes(excludes): """Normalize the excluded directory list.""" return [os.path.normpath(os.path.abspath(exclude)) for exclude in excludes]
Check if the directory is in the exclude list. Note: by having trailing slashes, we avoid common prefix issues, like e.g. an exlude "foo" also accidentally excluding "foobar". def is_excluded(root, excludes): """Check if the directory is in the exclude list. Note: by having trailing slashes, we avoid common prefix issues, like e.g. an exlude "foo" also accidentally excluding "foobar". """ root = os.path.normpath(root) for exclude in excludes: if root == exclude: return True return False
Generage the rst files Raises an :class:`OSError` if the source path is not a directory. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param src: path to python source files :type src: :class:`str` :param dest: output directory :type dest: :class:`str` :param exclude: list of paths to exclude :type exclude: :class:`list` :param followlinks: follow symbolic links :type followlinks: :class:`bool` :param force: overwrite existing files :type force: :class:`bool` :param dryrun: do not create any files :type dryrun: :class:`bool` :param private: include \"_private\" modules :type private: :class:`bool` :param suffix: file suffix :type suffix: :class:`str` :param template_dirs: directories to search for user templates :type template_dirs: None | :class:`list` :returns: None :rtype: None :raises: OSError def generate(app, src, dest, exclude=[], followlinks=False, force=False, dryrun=False, private=False, suffix='rst', template_dirs=None): """Generage the rst files Raises an :class:`OSError` if the source path is not a directory. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param src: path to python source files :type src: :class:`str` :param dest: output directory :type dest: :class:`str` :param exclude: list of paths to exclude :type exclude: :class:`list` :param followlinks: follow symbolic links :type followlinks: :class:`bool` :param force: overwrite existing files :type force: :class:`bool` :param dryrun: do not create any files :type dryrun: :class:`bool` :param private: include \"_private\" modules :type private: :class:`bool` :param suffix: file suffix :type suffix: :class:`str` :param template_dirs: directories to search for user templates :type template_dirs: None | :class:`list` :returns: None :rtype: None :raises: OSError """ suffix = suffix.strip('.') if not os.path.isdir(src): raise OSError("%s is not a directory" % src) if not os.path.isdir(dest) and not dryrun: os.makedirs(dest) src = os.path.normpath(os.path.abspath(src)) exclude = normalize_excludes(exclude) loader = make_loader(template_dirs) env = make_environment(loader) recurse_tree(app, env, src, dest, exclude, followlinks, force, dryrun, private, suffix)
Parse the config of the app and initiate the generation process :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :returns: None :rtype: None :raises: None def main(app): """Parse the config of the app and initiate the generation process :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :returns: None :rtype: None :raises: None """ c = app.config src = c.jinjaapi_srcdir if not src: return suffix = "rst" out = c.jinjaapi_outputdir or app.env.srcdir if c.jinjaapi_addsummarytemplate: tpath = pkg_resources.resource_filename(__package__, AUTOSUMMARYTEMPLATE_DIR) c.templates_path.append(tpath) tpath = pkg_resources.resource_filename(__package__, TEMPLATE_DIR) c.templates_path.append(tpath) prepare_dir(app, out, not c.jinjaapi_nodelete) generate(app, src, out, exclude=c.jinjaapi_exclude_paths, force=c.jinjaapi_force, followlinks=c.jinjaapi_followlinks, dryrun=c.jinjaapi_dryrun, private=c.jinjaapi_includeprivate, suffix=suffix, template_dirs=c.templates_path)
Return floating point equality. def _isclose(obja, objb, rtol=1e-05, atol=1e-08): """Return floating point equality.""" return abs(obja - objb) <= (atol + rtol * abs(objb))
Determine if an object is a real number. Both Python standard data types and Numpy data types are supported. :param obj: Object :type obj: any :rtype: boolean def _isreal(obj): """ Determine if an object is a real number. Both Python standard data types and Numpy data types are supported. :param obj: Object :type obj: any :rtype: boolean """ # pylint: disable=W0702 if (obj is None) or isinstance(obj, bool): return False try: cond = (int(obj) == obj) or (float(obj) == obj) except: return False return cond
r""" Convert a number to a string without using scientific notation. :param number: Number to convert :type number: integer or float :rtype: string :raises: RuntimeError (Argument \`number\` is not valid) def _no_exp(number): r""" Convert a number to a string without using scientific notation. :param number: Number to convert :type number: integer or float :rtype: string :raises: RuntimeError (Argument \`number\` is not valid) """ if isinstance(number, bool) or (not isinstance(number, (int, float))): raise RuntimeError("Argument `number` is not valid") mant, exp = _to_scientific_tuple(number) if not exp: return str(number) floating_mant = "." in mant mant = mant.replace(".", "") if exp < 0: return "0." + "0" * (-exp - 1) + mant if not floating_mant: return mant + "0" * exp + (".0" if isinstance(number, float) else "") lfpart = len(mant) - 1 if lfpart < exp: return (mant + "0" * (exp - lfpart)).rstrip(".") return mant
r""" Return mantissa and exponent of a number expressed in scientific notation. Full precision is maintained if the number is represented as a string. :param number: Number :type number: integer, float or string :rtype: Tuple whose first item is the mantissa (*string*) and the second item is the exponent (*integer*) of the number when expressed in scientific notation :raises: RuntimeError (Argument \`number\` is not valid) def _to_scientific_tuple(number): r""" Return mantissa and exponent of a number expressed in scientific notation. Full precision is maintained if the number is represented as a string. :param number: Number :type number: integer, float or string :rtype: Tuple whose first item is the mantissa (*string*) and the second item is the exponent (*integer*) of the number when expressed in scientific notation :raises: RuntimeError (Argument \`number\` is not valid) """ # pylint: disable=W0632 if isinstance(number, bool) or (not isinstance(number, (int, float, str))): raise RuntimeError("Argument `number` is not valid") convert = not isinstance(number, str) # Detect zero and return, simplifies subsequent algorithm if (convert and (not number)) or ( (not convert) and (not number.strip("0").strip(".")) ): return ("0", 0) # Break down number into its components, use Decimal type to # preserve resolution: # sign : 0 -> +, 1 -> - # digits: tuple with digits of number # exp : exponent that gives null fractional part sign, digits, exp = Decimal(str(number) if convert else number).as_tuple() mant = ( "{sign}{itg}.{frac}".format( sign="-" if sign else "", itg=digits[0], frac="".join(str(item) for item in digits[1:]), ) .rstrip("0") .rstrip(".") ) exp += len(digits) - 1 return (mant, exp)
Calculate the greatest common divisor (GCD) of a sequence of numbers. The sequence can be a list of numbers or a Numpy vector of numbers. The computations are carried out with a precision of 1E-12 if the objects are not `fractions <https://docs.python.org/3/library/fractions.html>`_. When possible it is best to use the `fractions <https://docs.python.org/3/library/fractions.html>`_ data type with the numerator and denominator arguments when computing the GCD of floating point numbers. :param vector: Vector of numbers :type vector: list of numbers or Numpy vector of numbers def gcd(vector): """ Calculate the greatest common divisor (GCD) of a sequence of numbers. The sequence can be a list of numbers or a Numpy vector of numbers. The computations are carried out with a precision of 1E-12 if the objects are not `fractions <https://docs.python.org/3/library/fractions.html>`_. When possible it is best to use the `fractions <https://docs.python.org/3/library/fractions.html>`_ data type with the numerator and denominator arguments when computing the GCD of floating point numbers. :param vector: Vector of numbers :type vector: list of numbers or Numpy vector of numbers """ # pylint: disable=C1801 if not len(vector): return None if len(vector) == 1: return vector[0] if len(vector) == 2: return pgcd(vector[0], vector[1]) current_gcd = pgcd(vector[0], vector[1]) for element in vector[2:]: current_gcd = pgcd(current_gcd, element) return current_gcd
r""" Scale a value to the range defined by a series. :param value: Value to normalize :type value: number :param series: List of numbers that defines the normalization range :type series: list :param offset: Normalization offset, i.e. the returned value will be in the range [**offset**, 1.0] :type offset: number :rtype: number :raises: * RuntimeError (Argument \`offset\` is not valid) * RuntimeError (Argument \`series\` is not valid) * RuntimeError (Argument \`value\` is not valid) * ValueError (Argument \`offset\` has to be in the [0.0, 1.0] range) * ValueError (Argument \`value\` has to be within the bounds of the argument \`series\`) For example:: >>> import pmisc >>> pmisc.normalize(15, [10, 20]) 0.5 >>> pmisc.normalize(15, [10, 20], 0.5) 0.75 def normalize(value, series, offset=0): r""" Scale a value to the range defined by a series. :param value: Value to normalize :type value: number :param series: List of numbers that defines the normalization range :type series: list :param offset: Normalization offset, i.e. the returned value will be in the range [**offset**, 1.0] :type offset: number :rtype: number :raises: * RuntimeError (Argument \`offset\` is not valid) * RuntimeError (Argument \`series\` is not valid) * RuntimeError (Argument \`value\` is not valid) * ValueError (Argument \`offset\` has to be in the [0.0, 1.0] range) * ValueError (Argument \`value\` has to be within the bounds of the argument \`series\`) For example:: >>> import pmisc >>> pmisc.normalize(15, [10, 20]) 0.5 >>> pmisc.normalize(15, [10, 20], 0.5) 0.75 """ if not _isreal(value): raise RuntimeError("Argument `value` is not valid") if not _isreal(offset): raise RuntimeError("Argument `offset` is not valid") try: smin = float(min(series)) smax = float(max(series)) except: raise RuntimeError("Argument `series` is not valid") value = float(value) offset = float(offset) if not 0 <= offset <= 1: raise ValueError("Argument `offset` has to be in the [0.0, 1.0] range") if not smin <= value <= smax: raise ValueError( "Argument `value` has to be within the bounds of argument `series`" ) return offset + ((1.0 - offset) * (value - smin) / (smax - smin))
r""" Calculate percentage difference between numbers. If only two numbers are given, the percentage difference between them is computed. If two sequences of numbers are given (either two lists of numbers or Numpy vectors), the element-wise percentage difference is computed. If any of the numbers in the arguments is zero the value returned is the maximum floating-point number supported by the Python interpreter. :param arga: First number, list of numbers or Numpy vector :type arga: float, integer, list of floats or integers, or Numpy vector of floats or integers :param argb: Second number, list of numbers or or Numpy vector :type argb: float, integer, list of floats or integers, or Numpy vector of floats or integers :param prec: Maximum length of the fractional part of the result :type prec: integer :rtype: Float, list of floats or Numpy vector, depending on the arguments type :raises: * RuntimeError (Argument \`arga\` is not valid) * RuntimeError (Argument \`argb\` is not valid) * RuntimeError (Argument \`prec\` is not valid) * TypeError (Arguments are not of the same type) def per(arga, argb, prec=10): r""" Calculate percentage difference between numbers. If only two numbers are given, the percentage difference between them is computed. If two sequences of numbers are given (either two lists of numbers or Numpy vectors), the element-wise percentage difference is computed. If any of the numbers in the arguments is zero the value returned is the maximum floating-point number supported by the Python interpreter. :param arga: First number, list of numbers or Numpy vector :type arga: float, integer, list of floats or integers, or Numpy vector of floats or integers :param argb: Second number, list of numbers or or Numpy vector :type argb: float, integer, list of floats or integers, or Numpy vector of floats or integers :param prec: Maximum length of the fractional part of the result :type prec: integer :rtype: Float, list of floats or Numpy vector, depending on the arguments type :raises: * RuntimeError (Argument \`arga\` is not valid) * RuntimeError (Argument \`argb\` is not valid) * RuntimeError (Argument \`prec\` is not valid) * TypeError (Arguments are not of the same type) """ # pylint: disable=C0103,C0200,E1101,R0204 if not isinstance(prec, int): raise RuntimeError("Argument `prec` is not valid") a_type = 1 * _isreal(arga) + 2 * (isiterable(arga) and not isinstance(arga, str)) b_type = 1 * _isreal(argb) + 2 * (isiterable(argb) and not isinstance(argb, str)) if not a_type: raise RuntimeError("Argument `arga` is not valid") if not b_type: raise RuntimeError("Argument `argb` is not valid") if a_type != b_type: raise TypeError("Arguments are not of the same type") if a_type == 1: arga, argb = float(arga), float(argb) num_min, num_max = min(arga, argb), max(arga, argb) return ( 0 if _isclose(arga, argb) else ( sys.float_info.max if _isclose(num_min, 0.0) else round((num_max / num_min) - 1, prec) ) ) # Contortions to handle lists and Numpy arrays without explicitly # having to import numpy ret = copy.copy(arga) for num, (x, y) in enumerate(zip(arga, argb)): if not _isreal(x): raise RuntimeError("Argument `arga` is not valid") if not _isreal(y): raise RuntimeError("Argument `argb` is not valid") x, y = float(x), float(y) ret[num] = ( 0 if _isclose(x, y) else ( sys.float_info.max if _isclose(x, 0.0) or _isclose(y, 0) else (round((max(x, y) / min(x, y)) - 1, prec)) ) ) return ret
Calculate the greatest common divisor (GCD) of two numbers. :param numa: First number :type numa: number :param numb: Second number :type numb: number :rtype: number For example: >>> import pmisc, fractions >>> pmisc.pgcd(10, 15) 5 >>> str(pmisc.pgcd(0.05, 0.02)) '0.01' >>> str(pmisc.pgcd(5/3.0, 2/3.0))[:6] '0.3333' >>> pmisc.pgcd( ... fractions.Fraction(str(5/3.0)), ... fractions.Fraction(str(2/3.0)) ... ) Fraction(1, 3) >>> pmisc.pgcd( ... fractions.Fraction(5, 3), ... fractions.Fraction(2, 3) ... ) Fraction(1, 3) def pgcd(numa, numb): """ Calculate the greatest common divisor (GCD) of two numbers. :param numa: First number :type numa: number :param numb: Second number :type numb: number :rtype: number For example: >>> import pmisc, fractions >>> pmisc.pgcd(10, 15) 5 >>> str(pmisc.pgcd(0.05, 0.02)) '0.01' >>> str(pmisc.pgcd(5/3.0, 2/3.0))[:6] '0.3333' >>> pmisc.pgcd( ... fractions.Fraction(str(5/3.0)), ... fractions.Fraction(str(2/3.0)) ... ) Fraction(1, 3) >>> pmisc.pgcd( ... fractions.Fraction(5, 3), ... fractions.Fraction(2, 3) ... ) Fraction(1, 3) """ # Test for integers this way to be valid also for Numpy data types without # actually importing (and package depending on) Numpy int_args = (int(numa) == numa) and (int(numb) == numb) fraction_args = isinstance(numa, Fraction) and isinstance(numb, Fraction) # Force conversion for Numpy data types if int_args: numa, numb = int(numa), int(numb) elif not fraction_args: numa, numb = float(numa), float(numb) # Limit floating numbers to a "sane" fractional part resolution if (not int_args) and (not fraction_args): numa, numb = ( Fraction(_no_exp(numa)).limit_denominator(), Fraction(_no_exp(numb)).limit_denominator(), ) while numb: numa, numb = ( numb, (numa % numb if int_args else (numa % numb).limit_denominator()), ) return int(numa) if int_args else (numa if fraction_args else float(numa))
Connect to a websocket :channels: List of SockChannel instances def connect_ws(self, post_connect_callback, channels, reconnect=False): """ Connect to a websocket :channels: List of SockChannel instances """ self.post_conn_cb = post_connect_callback self.channels = channels self.wsendpoint = self.context["conf"]["endpoints"].get("websocket") # Skip connecting if we don't have any channels to listen to if not channels: return # Create socket, connect, setting callbacks along the way self.sock = Socketcluster.socket(self.wsendpoint) self.sock.setBasicListener(self._on_connect, self._on_connect_close, self._on_connect_error) self.sock.setAuthenticationListener(self._on_set_auth, self._on_auth) self.sock.setreconnection(reconnect) self.sock.connect()
Submit a request on the websocket def wscall(self, method, query=None, callback=None): """Submit a request on the websocket""" if callback is None: self.sock.emit(method, query) else: self.sock.emitack(method, query, callback)
Connect the provided channels def connect_channels(self, channels): """Connect the provided channels""" self.log.info(f"Connecting to channels...") for chan in channels: chan.connect(self.sock) self.log.info(f"\t{chan.channel}")
Set Auth request received from websocket def _on_set_auth(self, sock, token): """Set Auth request received from websocket""" self.log.info(f"Token received: {token}") sock.setAuthtoken(token)
Message received from websocket def _on_auth(self, sock, authenticated): # pylint: disable=unused-argument """Message received from websocket""" def ack(eventname, error, data): # pylint: disable=unused-argument """Ack""" if error: self.log.error(f"""OnAuth: {error}""") else: self.connect_channels(self.channels) self.post_conn_cb() sock.emitack("auth", self.creds, ack)
Error received from websocket def _on_connect_error(self, sock, err): # pylint: disable=unused-argument """Error received from websocket""" if isinstance(err, SystemExit): self.log.error(f"Shutting down websocket connection") else: self.log.error(f"Websocket error: {err}")
Attach a given socket to a channel def connect(self, sock): """Attach a given socket to a channel""" def cbwrap(*args, **kwargs): """Callback wrapper; passes in response_type""" self.callback(self.response_type, *args, **kwargs) self.sock = sock self.sock.subscribe(self.channel) self.sock.onchannel(self.channel, cbwrap)
Run command `cmd`. It's like that, and that's the way it is. def run_cmd(cmd, input=None, timeout=30, max_try=3, num_try=1): '''Run command `cmd`. It's like that, and that's the way it is. ''' if type(cmd) == str: cmd = cmd.split() process = subprocess.Popen(cmd, stdin=open('/dev/null', 'r'), stdout=subprocess.PIPE, stderr=subprocess.PIPE) communicate_has_timeout = func_has_arg(func=process.communicate, arg='timeout') exception = Exception if communicate_has_timeout: exception = subprocess.TimeoutExpired # python 3.x stdout = stderr = b'' exitcode = None try: if communicate_has_timeout: # python 3.x stdout, stderr = process.communicate(input, timeout) exitcode = process.wait() else: # python 2.x if timeout is None: stdout, stderr = process.communicate(input) exitcode = process.wait() else: # thread-recipe: https://stackoverflow.com/a/4825933 def target(): # closure-recipe: https://stackoverflow.com/a/23558809 target.out, target.err = process.communicate(input) import threading thread = threading.Thread(target=target) thread.start() thread.join(timeout) if thread.is_alive(): process.terminate() thread.join() exitcode = None else: exitcode = process.wait() stdout = target.out stderr = target.err except exception: if num_try < max_try: return run_cmd(cmd, input, timeout, max_try, num_try+1) else: return CmdResult(exitcode, stdout, stderr, cmd, input) return CmdResult(exitcode, stdout, stderr, cmd, input)
find first positional arg (does not start with -), take it out of array and return it separately returns (arg, array) def pop_first_arg(argv): """ find first positional arg (does not start with -), take it out of array and return it separately returns (arg, array) """ for arg in argv: if not arg.startswith('-'): argv.remove(arg) return (arg, argv) return (None, argv)
check options requirements, print and return exit value def check_options(options, parser): """ check options requirements, print and return exit value """ if not options.get('release_environment', None): print("release environment is required") parser.print_help() return os.EX_USAGE return 0
write all needed state info to filesystem def write(self): """ write all needed state info to filesystem """ dumped = self._fax.codec.dump(self.__state, open(self.state_file, 'w'))
configure the module at the given path with a config template and file. path = the filesystem path to the given module template = the config template filename within that path config_name = the config filename within that path params = a dict containing config params, which are found in the template using %(key)s. def package_config(path, template='__config__.ini.TEMPLATE', config_name='__config__.ini', **params): """configure the module at the given path with a config template and file. path = the filesystem path to the given module template = the config template filename within that path config_name = the config filename within that path params = a dict containing config params, which are found in the template using %(key)s. """ config_fns = [] template_fns = rglob(path, template) for template_fn in template_fns: config_template = ConfigTemplate(fn=template_fn) config = config_template.render( fn=os.path.join(os.path.dirname(template_fn), config_name), prompt=True, path=path, **params) config.write() config_fns.append(config.fn) log.info('wrote %r' % config) return config_fns
write the contents of this config to fn or its __filename__. def write(self, fn=None, sorted=False, wait=0): """write the contents of this config to fn or its __filename__. """ config = ConfigParser(interpolation=None) if sorted==True: keys.sort() for key in self.__dict__.get('ordered_keys') or self.keys(): config[key] = {} ks = self[key].keys() if sorted==True: ks.sort() for k in ks: if type(self[key][k])==list and self.__join_list__ is not None: config[key][k] = self.__join_list__.join([v for v in self[key][k] if v!='']) else: config[key][k] = str(self[key][k]) fn = fn or self.__dict__.get('__filename__') # use advisory locking on this file i = 0 while os.path.exists(fn+'.LOCK') and i < wait: i += 1 time.sleep(1) if os.path.exists(fn+'.LOCK'): raise FileExistsError(fn + ' is locked for writing') else: with open(fn+'.LOCK', 'w') as lf: lf.write(time.strftime("%Y-%m-%d %H:%M:%S %Z")) with open(fn, 'w') as f: config.write(f) os.remove(fn+'.LOCK')
returns a list of params that this ConfigTemplate expects to receive def expected_param_keys(self): """returns a list of params that this ConfigTemplate expects to receive""" expected_keys = [] r = re.compile('%\(([^\)]+)\)s') for block in self.keys(): for key in self[block].keys(): s = self[block][key] if type(s)!=str: continue md = re.search(r, s) while md is not None: k = md.group(1) if k not in expected_keys: expected_keys.append(k) s = s[md.span()[1]:] md = re.search(r, s) return expected_keys
return a Config with the given params formatted via ``str.format(**params)``. fn=None : If given, will assign this filename to the rendered Config. prompt=False : If True, will prompt for any param that is None. def render(self, fn=None, prompt=False, **params): """return a Config with the given params formatted via ``str.format(**params)``. fn=None : If given, will assign this filename to the rendered Config. prompt=False : If True, will prompt for any param that is None. """ from getpass import getpass expected_keys = self.expected_param_keys() compiled_params = Dict(**params) for key in expected_keys: if key not in compiled_params.keys(): if prompt==True: if key=='password': compiled_params[key] = getpass("%s: " % key) else: compiled_params[key] = input("%s: " % key) if 'path' in key: compiled_params[key] = compiled_params[key].replace('\\','') else: compiled_params[key] = "%%(%s)s" % key config = ConfigTemplate(fn=fn, **self) config.__dict__['ordered_keys'] = self.__dict__.get('ordered_keys') for block in config.keys(): for key in config[block].keys(): if type(config[block][key])==str: config[block][key] = config[block][key] % compiled_params return config
Takes a crash id, pulls down data from Socorro, generates signature data def main(): """Takes a crash id, pulls down data from Socorro, generates signature data""" parser = argparse.ArgumentParser( formatter_class=WrappedTextHelpFormatter, description=DESCRIPTION ) parser.add_argument( '-v', '--verbose', help='increase output verbosity', action='store_true' ) parser.add_argument( 'crashid', help='crash id to generate signatures for' ) args = parser.parse_args() api_token = os.environ.get('SOCORRO_API_TOKEN', '') crash_id = args.crashid.strip() resp = fetch('/RawCrash/', crash_id, api_token) if resp.status_code == 404: printerr('%s: does not exist.' % crash_id) return 1 if resp.status_code == 429: printerr('API rate limit reached. %s' % resp.content) # FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a # few minutes. return 1 if resp.status_code == 500: printerr('HTTP 500: %s' % resp.content) return 1 raw_crash = resp.json() # If there's an error in the raw crash, then something is wrong--probably with the API # token. So print that out and exit. if 'error' in raw_crash: print('Error fetching raw crash: %s' % raw_crash['error'], file=sys.stderr) return 1 resp = fetch('/ProcessedCrash/', crash_id, api_token) if resp.status_code == 404: printerr('%s: does not have processed crash.' % crash_id) return 1 if resp.status_code == 429: printerr('API rate limit reached. %s' % resp.content) # FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a # few minutes. return 1 if resp.status_code == 500: printerr('HTTP 500: %s' % resp.content) return 1 processed_crash = resp.json() # If there's an error in the processed crash, then something is wrong--probably with the # API token. So print that out and exit. if 'error' in processed_crash: printerr('Error fetching processed crash: %s' % processed_crash['error']) return 1 crash_data = convert_to_crash_data(raw_crash, processed_crash) print(json.dumps(crash_data, indent=2))
Wraps text like HelpFormatter, but doesn't squash lines This makes it easier to do lists and paragraphs. def _fill_text(self, text, width, indent): """Wraps text like HelpFormatter, but doesn't squash lines This makes it easier to do lists and paragraphs. """ parts = text.split('\n\n') for i, part in enumerate(parts): # Check to see if it's a bulleted list--if so, then fill each line if part.startswith('* '): subparts = part.split('\n') for j, subpart in enumerate(subparts): subparts[j] = super(WrappedTextHelpFormatter, self)._fill_text( subpart, width, indent ) parts[i] = '\n'.join(subparts) else: parts[i] = super(WrappedTextHelpFormatter, self)._fill_text(part, width, indent) return '\n\n'.join(parts)
Return a dict of services by name def get_api_services_by_name(self): """Return a dict of services by name""" if not self.services_by_name: self.services_by_name = dict({s.get('name'): s for s in self.conf .get("api") .get("services")}) return self.services_by_name
Returns the API endpoints def get_api_endpoints(self, apiname): """Returns the API endpoints""" try: return self.services_by_name\ .get(apiname)\ .get("endpoints")\ .copy() except AttributeError: raise Exception(f"Couldn't find the API endpoints")
Returns the websocket subscriptions def get_ws_subscriptions(self, apiname): """Returns the websocket subscriptions""" try: return self.services_by_name\ .get(apiname)\ .get("subscriptions")\ .copy() except AttributeError: raise Exception(f"Couldn't find the websocket subscriptions")
Returns the API configuration def get_api(self, name=None): """Returns the API configuration""" if name is None: try: return self.conf.get("api").copy() except: # NOQA raise Exception(f"Couldn't find the API configuration")
Returns the specific service config definition def get_api_service(self, name=None): """Returns the specific service config definition""" try: svc = self.services_by_name.get(name, None) if svc is None: raise ValueError(f"Couldn't find the API service configuration") return svc except: # NOQA raise Exception(f"Failed to retrieve the API service configuration")
Return a string corresponding to the exception type. def _ex_type_str(exobj): """Return a string corresponding to the exception type.""" regexp = re.compile(r"<(?:\bclass\b|\btype\b)\s+'?([\w|\.]+)'?>") exc_type = str(exobj) if regexp.match(exc_type): exc_type = regexp.match(exc_type).groups()[0] exc_type = exc_type[11:] if exc_type.startswith("exceptions.") else exc_type if "." in exc_type: exc_type = exc_type.split(".")[-1] return exc_type
Convert to ASCII. def _unicode_to_ascii(obj): # pragma: no cover """Convert to ASCII.""" # pylint: disable=E0602,R1717 if isinstance(obj, dict): return dict( [ (_unicode_to_ascii(key), _unicode_to_ascii(value)) for key, value in obj.items() ] ) if isinstance(obj, list): return [_unicode_to_ascii(element) for element in obj] if isinstance(obj, unicode): return obj.encode("utf-8") return obj
Send results def send_result(self, return_code, output, service_description='', specific_servers=None): ''' Send results ''' if specific_servers == None: specific_servers = self.servers else: specific_servers = set(self.servers).intersection(specific_servers) for server in specific_servers: if self.servers[server]['availability']: try: self.servers[server]['notifier'].svc_result(self.servers[server]['custom_fqdn'], service_description, int(return_code), str(output)) LOG.info("[nsca][%s][%s]: Data sent", service_description, self.servers[server]['host']) except (socket.gaierror, socket.error), error: self.servers[server]['availability'] = False LOG.error("[nsca][%s][%s]: %s", service_description, self.servers[server]['host'], error[1]) else: LOG.error("[nsca][%s][%s]: Data not sent, server is unavailable", service_description, self.servers[server]['host'])
Get remote hosts from Selenium Grid Hub Console @param hub_ip: hub ip of selenium grid hub @param port: hub port of selenium grid hub def get_remote_executors(hub_ip, port = 4444): ''' Get remote hosts from Selenium Grid Hub Console @param hub_ip: hub ip of selenium grid hub @param port: hub port of selenium grid hub ''' resp = requests.get("http://%s:%s/grid/console" %(hub_ip, port)) remote_hosts = () if resp.status_code == 200: remote_hosts = re.findall("remoteHost: ([\w/\.:]+)",resp.text) return [host + "/wd/hub" for host in remote_hosts]
Generate remote drivers with desired capabilities(self.__caps) and command_executor @param executor: command executor for selenium remote driver @param capabilities: A dictionary of capabilities to request when starting the browser session. @return: remote driver def gen_remote_driver(executor, capabilities): ''' Generate remote drivers with desired capabilities(self.__caps) and command_executor @param executor: command executor for selenium remote driver @param capabilities: A dictionary of capabilities to request when starting the browser session. @return: remote driver ''' # selenium requires browser's driver and PATH env. Firefox's driver is required for selenium3.0 firefox_profile = capabilities.pop("firefox_profile",None) return webdriver.Remote(executor, desired_capabilities=capabilities, browser_profile = firefox_profile)
Generate localhost drivers with desired capabilities(self.__caps) @param browser: firefox or chrome @param capabilities: A dictionary of capabilities to request when starting the browser session. @return: localhost driver def gen_local_driver(browser, capabilities): ''' Generate localhost drivers with desired capabilities(self.__caps) @param browser: firefox or chrome @param capabilities: A dictionary of capabilities to request when starting the browser session. @return: localhost driver ''' if browser == "firefox": fp = capabilities.pop("firefox_profile",None) return webdriver.Firefox(desired_capabilities =capabilities, firefox_profile=fp) elif browser == "chrome": return webdriver.Chrome(desired_capabilities=capabilities) else: raise TypeError("Unsupport browser {}".format(browser))
Calculate total energy production. Not rounded def _production(self): """Calculate total energy production. Not rounded""" return self._nuclear + self._diesel + self._gas + self._wind + self._combined + self._vapor + self._solar + self._hydraulic + self._carbon + self._waste + self._other
Calculate total energy production. Not Rounded def _links(self): """Calculate total energy production. Not Rounded""" total = 0.0 for value in self.link.values(): total += value return total
Ask a yes/no question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits <Enter>. It must be "yes" (the default), "no" or None (meaning an answer is required of the user). The "answer" return value is True for "yes" or False for "no". def query_yes_no(question, default="yes"): """Ask a yes/no question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits <Enter>. It must be "yes" (the default), "no" or None (meaning an answer is required of the user). The "answer" return value is True for "yes" or False for "no". """ valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError("invalid default answer: '%s'" % default) while True: print(question + prompt) choice = input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
Appends identifiers for the different databases (such as Entrez id's) and returns them. Uses the CrossRef class below. def wall_of_name(self): ''' Appends identifiers for the different databases (such as Entrez id's) and returns them. Uses the CrossRef class below. ''' names = [] if self.standard_name: names.append(self.standard_name) if self.systematic_name: names.append(self.systematic_name) names.extend([xref.xrid for xref in self.crossref_set.all()]) for i in range(len(names)): names[i] = re.sub(nonalpha, '', names[i]) names_string = ' '.join(names) if self.standard_name: names_string += ' ' + re.sub(num, '', self.standard_name) return names_string
Override save() method to make sure that standard_name and systematic_name won't be null or empty, or consist of only space characters (such as space, tab, new line, etc). def save(self, *args, **kwargs): """ Override save() method to make sure that standard_name and systematic_name won't be null or empty, or consist of only space characters (such as space, tab, new line, etc). """ empty_std_name = False if not self.standard_name or self.standard_name.isspace(): empty_std_name = True empty_sys_name = False if not self.systematic_name or self.systematic_name.isspace(): empty_sys_name = True if empty_std_name and empty_sys_name: raise ValueError( "Both standard_name and systematic_name are empty") super(Gene, self).save(*args, **kwargs)
Extends save() method of Django models to check that the database name is not left blank. Note: 'blank=False' is only checked at a form-validation-stage. A test using Fixtureless that tries to randomly create a CrossRefDB with an empty string name would unintentionally break the test. def save(self, *args, **kwargs): """ Extends save() method of Django models to check that the database name is not left blank. Note: 'blank=False' is only checked at a form-validation-stage. A test using Fixtureless that tries to randomly create a CrossRefDB with an empty string name would unintentionally break the test. """ if self.name == '': raise FieldError else: return super(CrossRefDB, self).save(*args, **kwargs)
Make a L{StaticProducer} that will produce the body of this response. This method will also set the response code and Content-* headers. @param request: The L{Request} object. @param fileForReading: The file object containing the resource. @return: A L{StaticProducer}. Calling C{.start()} on this will begin producing the response. def makeProducer(self, request, fileForReading): """ Make a L{StaticProducer} that will produce the body of this response. This method will also set the response code and Content-* headers. @param request: The L{Request} object. @param fileForReading: The file object containing the resource. @return: A L{StaticProducer}. Calling C{.start()} on this will begin producing the response. """ byteRange = request.getHeader(b'range') if byteRange is None or not self.getFileSize(): self._setContentHeaders(request) request.setResponseCode(http.OK) return NoRangeStaticProducer(request, fileForReading) try: parsedRanges = self._parseRangeHeader(byteRange) except ValueError: logger.warning("Ignoring malformed Range header %r" % (byteRange,)) self._setContentHeaders(request) request.setResponseCode(http.OK) return NoRangeStaticProducer(request, fileForReading) if len(parsedRanges) == 1: offset, size = self._doSingleRangeRequest( request, parsedRanges[0]) self._setContentHeaders(request, size) return SingleRangeStaticProducer( request, fileForReading, offset, size) else: rangeInfo = self._doMultipleRangeRequest(request, parsedRanges) return MultipleRangeStaticProducer( request, fileForReading, rangeInfo)
Begin sending the contents of this L{File} (or a subset of the contents, based on the 'range' header) to the given request. def render_GET(self, request): """ Begin sending the contents of this L{File} (or a subset of the contents, based on the 'range' header) to the given request. """ request.setHeader(b'accept-ranges', b'bytes') producer = self.makeProducer(request, self.fileObject) if request.method == b'HEAD': return b'' def done(ign): producer.stopProducing() request.notifyFinish().addCallbacks(done, done) producer.start() # and make sure the connection doesn't get closed return server.NOT_DONE_YET
Implement the interface for the adapter object def interface(self, context): """Implement the interface for the adapter object""" self.context = context self.callback = self.context.get("callback")
Executed on shutdown of application def shutdown(self): """Executed on shutdown of application""" self.stopped.set() if hasattr(self.api, "shutdown"): self.api.shutdown() for thread in self.thread.values(): thread.join()
<input value="Test" type="button" onClick="alert('OK')" > def SwitchToAlert(): ''' <input value="Test" type="button" onClick="alert('OK')" > ''' try: alert = WebDriverWait(Web.driver, 10).until(lambda driver: driver.switch_to_alert()) return alert except: print("Waring: Timeout at %d seconds.Alert was not found.") return False
find the element with controls def _element(cls): ''' find the element with controls ''' if not cls.__is_selector(): raise Exception("Invalid selector[%s]." %cls.__control["by"]) driver = Web.driver try: elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"])) except: raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"])) if len(elements) < cls.__control["index"] + 1: raise Exception("Element [%s]: Element Index Issue! There are [%s] Elements! Index=[%s]" % (cls.__name__, len(elements), cls.__control["index"])) if len(elements) > 1: print("Element [%s]: There are [%d] elements, choosed index=%d" %(cls.__name__,len(elements),cls.__control["index"])) elm = elements[cls.__control["index"]] cls.__control["index"] = 0 return elm
find the elements with controls def _elements(cls): ''' find the elements with controls ''' if not cls.__is_selector(): raise Exception("Invalid selector[%s]." %cls.__control["by"]) driver = Web.driver try: elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"])) except: raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"])) return elements
set dynamic value from the string data of response @param name: glob parameter name @param regx: re._pattern_type e.g. DyStrData("a",re.compile('123')) def DyStrData(cls, name, regx, index = 0): ''' set dynamic value from the string data of response @param name: glob parameter name @param regx: re._pattern_type e.g. DyStrData("a",re.compile('123')) ''' text = Web.PageSource() if not text: return if not isinstance(regx, re._pattern_type): raise Exception("DyStrData need the arg which have compiled the regular expression.") values = regx.findall(text) result = "" if len(values)>index: result = values[index] cls.glob.update({name:result})
set dynamic value from the json data of response @note: 获取innerHTML json的数据 如, <html><body>{ "code": 1,"desc": "成功"}</body></html> @param name: glob parameter name @param sequence: sequence for the json e.g. result={"a":1, "b":[1,2,3,4], "c":{"d":5,"e":6}, "f":{"g":[7,8,9]}, "h":[{"i":10,"j":11},{"k":12}] } sequence1 ="a" # -> 1 sequence2 ="b.3" # -> 4 sequence3 = "f.g.2" # -> 9 sequence4 = "h.0.j" # -> 11 def DyJsonData(cls,name, sequence): ''' set dynamic value from the json data of response @note: 获取innerHTML json的数据 如, <html><body>{ "code": 1,"desc": "成功"}</body></html> @param name: glob parameter name @param sequence: sequence for the json e.g. result={"a":1, "b":[1,2,3,4], "c":{"d":5,"e":6}, "f":{"g":[7,8,9]}, "h":[{"i":10,"j":11},{"k":12}] } sequence1 ="a" # -> 1 sequence2 ="b.3" # -> 4 sequence3 = "f.g.2" # -> 9 sequence4 = "h.0.j" # -> 11 ''' cls.SetControl(by = "tag name", value = "body") json_body = cls._element().get_attribute('innerHTML') if not json_body: return resp = json.loads(json_body) sequence = [_parse_string_value(i) for i in sequence.split('.')] for i in sequence: try: if isinstance(i, int): resp = resp[i] else: resp = resp.get(i) except: cls.glob.update({name:None}) return cls.glob.update({name:resp})
获取当前页面的url def VerifyURL(cls, url): """ 获取当前页面的url """ if Web.driver.current_url == url: return True else: print("VerifyURL: %s" % Web.driver.current_url) return False
通过索引,选择下拉框选项, @param index: 下拉框 索引 def SelectByIndex(cls, index): ''' 通过索引,选择下拉框选项, @param index: 下拉框 索引 ''' try: Select(cls._element()).select_by_index(int(index)) except: return False
通过索引,取消选择下拉框选项, @param index: 下拉框 索引 def DeSelectByIndex(cls, index): ''' 通过索引,取消选择下拉框选项, @param index: 下拉框 索引 ''' try: Select(cls._element()).deselect_by_index(int(index)) except: return False
鼠标悬浮 def MouseOver(cls): ''' 鼠标悬浮 ''' element = cls._element() action = ActionChains(Web.driver) action.move_to_element(element) action.perform() time.sleep(1)
左键 点击 1次 def Click(cls): ''' 左键 点击 1次 ''' element= cls._element() action = ActionChains(Web.driver) action.click(element) action.perform()
左键点击2次 def DoubleClick(cls): ''' 左键点击2次 ''' element = cls._element() action = ActionChains(Web.driver) action.double_click(element) action.perform()
Description: Sometimes, one click on the element doesn't work. So wait more time, then click again and again. Risk: It may operate more than one click operations. def EnhancedClick(cls): ''' Description: Sometimes, one click on the element doesn't work. So wait more time, then click again and again. Risk: It may operate more than one click operations. ''' element = cls._element() for _ in range(3): action = ActionChains(Web.driver) action.move_to_element(element) action.perform() time.sleep(0.5)
右键点击1次 def RightClick(cls): ''' 右键点击1次 ''' element = cls._element() action = ActionChains(Web.driver) action.context_click(element) action.perform()
相当于 按压,press def ClickAndHold(cls): ''' 相当于 按压,press ''' element = cls._element() action = ActionChains(Web.driver) action.click_and_hold(element) action.perform()
释放按压操作 def ReleaseClick(cls): ''' 释放按压操作 ''' element = cls._element() action = ActionChains(Web.driver) action.release(element) action.perform()
在指定输入框发送回回车键 @note: key event -> enter def Enter(cls): ''' 在指定输入框发送回回车键 @note: key event -> enter ''' element = cls._element() action = ActionChains(Web.driver) action.send_keys_to_element(element, Keys.ENTER) action.perform()
在指定元素上执行ctrl组合键事件 @note: key event -> control + key @param key: 如'X' def Ctrl(cls, key): """ 在指定元素上执行ctrl组合键事件 @note: key event -> control + key @param key: 如'X' """ element = cls._element() element.send_keys(Keys.CONTROL, key)