code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _requires_submission(self): """ Returns True if the time since the last submission is greater than the submission interval. If no submissions have ever been made, check if the database last modified time is greater than the submission interval. """ if self.dbcon_part is None: return False tables = get_table_list(self.dbcon_part) nrows = 0 for table in tables: if table == '__submissions__': continue nrows += get_number_of_rows(self.dbcon_part, table) if nrows: logger.debug('%d new statistics were added since the last submission.' % nrows) else: logger.debug('No new statistics were added since the last submission.') t0 = datetime.datetime.now() s = self['__submissions__'] last_submission = s.get_last(1) if last_submission: logger.debug('Last submission was %s' % last_submission[0]['Time']) t_ref = datetime.datetime.strptime(last_submission[0]['Time'], Table.time_fmt) else: t_ref = datetime.datetime.fromtimestamp(os.path.getmtime(self.filepath)) submission_interval_passed = (t0 - t_ref).total_seconds() > self.submit_interval_s submission_required = bool(submission_interval_passed and nrows) if submission_required: logger.debug('A submission is overdue.') else: logger.debug('No submission required.') return submission_required
Returns True if the time since the last submission is greater than the submission interval. If no submissions have ever been made, check if the database last modified time is greater than the submission interval.
def name(self): """Dict with locale codes as keys and localized name as value""" # pylint:disable=E1101 return next((self.names.get(x) for x in self._locales if x in self.names), None)
Dict with locale codes as keys and localized name as value
def recv(self, stream, crc_mode=1, retry=16, timeout=60, delay=1, quiet=0): ''' Receive a stream via the XMODEM protocol. >>> stream = file('/etc/issue', 'wb') >>> print modem.recv(stream) 2342 Returns the number of bytes received on success or ``None`` in case of failure. ''' # initiate protocol error_count = 0 char = 0 cancel = 0 while True: # first try CRC mode, if this fails, # fall back to checksum mode if error_count >= retry: self.abort(timeout=timeout) return None elif crc_mode and error_count < (retry / 2): if not self.putc(CRC): time.sleep(delay) error_count += 1 else: crc_mode = 0 if not self.putc(NAK): time.sleep(delay) error_count += 1 char = self.getc(1, timeout) if not char: error_count += 1 continue elif char == SOH: #crc_mode = 0 break elif char == STX: break elif char == CAN: if cancel: return None else: cancel = 1 else: error_count += 1 # read data error_count = 0 income_size = 0 packet_size = 128 sequence = 1 cancel = 0 while True: while True: if char == SOH: packet_size = 128 break elif char == STX: packet_size = 1024 break elif char == EOT: # We received an EOT, so send an ACK and return the received # data length self.putc(ACK) return income_size elif char == CAN: # cancel at two consecutive cancels if cancel: return None else: cancel = 1 else: if not quiet: print >> sys.stderr, \ 'recv ERROR expected SOH/EOT, got', ord(char) error_count += 1 if error_count >= retry: self.abort() return None # read sequence error_count = 0 cancel = 0 seq1 = ord(self.getc(1)) seq2 = 0xff - ord(self.getc(1)) if seq1 == sequence and seq2 == sequence: # sequence is ok, read packet # packet_size + checksum data = self.getc(packet_size + 1 + crc_mode, timeout) if crc_mode: csum = (ord(data[-2]) << 8) + ord(data[-1]) data = data[:-2] log.debug('CRC (%04x <> %04x)' % \ (csum, self.calc_crc(data))) valid = csum == self.calc_crc(data) else: csum = data[-1] data = data[:-1] log.debug('checksum (checksum(%02x <> %02x)' % \ (ord(csum), self.calc_checksum(data))) valid = ord(csum) == self.calc_checksum(data) # valid data, append chunk if valid: income_size += len(data) stream.write(data) self.putc(ACK) sequence = (sequence + 1) % 0x100 char = self.getc(1, timeout) continue else: # consume data self.getc(packet_size + 1 + crc_mode) self.debug('expecting sequence %d, got %d/%d' % \ (sequence, seq1, seq2)) # something went wrong, request retransmission self.putc(NAK)
Receive a stream via the XMODEM protocol. >>> stream = file('/etc/issue', 'wb') >>> print modem.recv(stream) 2342 Returns the number of bytes received on success or ``None`` in case of failure.
def from_enum(gtype, enum_value): """Turn an int back into an enum string. """ pointer = vips_lib.vips_enum_nick(gtype, enum_value) if pointer == ffi.NULL: raise Error('value not in enum') return _to_string(pointer)
Turn an int back into an enum string.
def colorscale(mag, cmin, cmax): """ Return a tuple of floats between 0 and 1 for R, G, and B. From Python Cookbook (9.11?) """ # Normalize to 0-1 try: x = float(mag-cmin)/(cmax-cmin) except ZeroDivisionError: x = 0.5 # cmax == cmin blue = min((max((4*(0.75-x), 0.)), 1.)) red = min((max((4*(x-0.25), 0.)), 1.)) green = min((max((4*abs(x-0.5)-1., 0.)), 1.)) return red, green, blue
Return a tuple of floats between 0 and 1 for R, G, and B. From Python Cookbook (9.11?)
def update_variables(func): """ Use this decorator on Step.action implementation. Your action method should always return variables, or both variables and output. This decorator will update variables with output. """ @wraps(func) def wrapper(self, *args, **kwargs): result = func(self, *args, **kwargs) if isinstance(result, tuple): return self.process_register(result[0], result[1]) else: return self.process_register(result) return wrapper
Use this decorator on Step.action implementation. Your action method should always return variables, or both variables and output. This decorator will update variables with output.
def info(dev): ''' Extract all info delivered by udevadm CLI Example: .. code-block:: bash salt '*' udev.info /dev/sda salt '*' udev.info /sys/class/net/eth0 ''' if 'sys' in dev: qtype = 'path' else: qtype = 'name' cmd = 'udevadm info --export --query=all --{0}={1}'.format(qtype, dev) udev_result = __salt__['cmd.run_all'](cmd, output_loglevel='quiet') if udev_result['retcode'] != 0: raise CommandExecutionError(udev_result['stderr']) return _parse_udevadm_info(udev_result['stdout'])[0]
Extract all info delivered by udevadm CLI Example: .. code-block:: bash salt '*' udev.info /dev/sda salt '*' udev.info /sys/class/net/eth0
def neighbor_add(self, address, remote_as, remote_port=DEFAULT_BGP_PORT, enable_ipv4=DEFAULT_CAP_MBGP_IPV4, enable_ipv6=DEFAULT_CAP_MBGP_IPV6, enable_vpnv4=DEFAULT_CAP_MBGP_VPNV4, enable_vpnv6=DEFAULT_CAP_MBGP_VPNV6, enable_evpn=DEFAULT_CAP_MBGP_EVPN, enable_ipv4fs=DEFAULT_CAP_MBGP_IPV4FS, enable_ipv6fs=DEFAULT_CAP_MBGP_IPV6FS, enable_vpnv4fs=DEFAULT_CAP_MBGP_VPNV4FS, enable_vpnv6fs=DEFAULT_CAP_MBGP_VPNV6FS, enable_l2vpnfs=DEFAULT_CAP_MBGP_L2VPNFS, enable_enhanced_refresh=DEFAULT_CAP_ENHANCED_REFRESH, enable_four_octet_as_number=DEFAULT_CAP_FOUR_OCTET_AS_NUMBER, next_hop=None, password=None, multi_exit_disc=None, site_of_origins=None, is_route_server_client=DEFAULT_IS_ROUTE_SERVER_CLIENT, is_route_reflector_client=DEFAULT_IS_ROUTE_REFLECTOR_CLIENT, is_next_hop_self=DEFAULT_IS_NEXT_HOP_SELF, local_address=None, local_port=None, local_as=None, connect_mode=DEFAULT_CONNECT_MODE): """ This method registers a new neighbor. The BGP speaker tries to establish a bgp session with the peer (accepts a connection from the peer and also tries to connect to it). ``address`` specifies the IP address of the peer. It must be the string representation of an IP address. Only IPv4 is supported now. ``remote_as`` specifies the AS number of the peer. It must be an integer between 1 and 65535. ``remote_port`` specifies the TCP port number of the peer. ``enable_ipv4`` enables IPv4 address family for this neighbor. ``enable_ipv6`` enables IPv6 address family for this neighbor. ``enable_vpnv4`` enables VPNv4 address family for this neighbor. ``enable_vpnv6`` enables VPNv6 address family for this neighbor. ``enable_evpn`` enables Ethernet VPN address family for this neighbor. ``enable_ipv4fs`` enables IPv4 Flow Specification address family for this neighbor. ``enable_ipv6fs`` enables IPv6 Flow Specification address family for this neighbor. ``enable_vpnv4fs`` enables VPNv4 Flow Specification address family for this neighbor. ``enable_vpnv6fs`` enables VPNv6 Flow Specification address family for this neighbor. ``enable_l2vpnfs`` enables L2VPN Flow Specification address family for this neighbor. ``enable_enhanced_refresh`` enables Enhanced Route Refresh for this neighbor. ``enable_four_octet_as_number`` enables Four-Octet AS Number capability for this neighbor. ``next_hop`` specifies the next hop IP address. If not specified, host's ip address to access to a peer is used. ``password`` is used for the MD5 authentication if it's specified. By default, the MD5 authentication is disabled. ``multi_exit_disc`` specifies multi exit discriminator (MED) value as an int type value. If omitted, MED is not sent to the neighbor. ``site_of_origins`` specifies site_of_origin values. This parameter must be a list of string. ``is_route_server_client`` specifies whether this neighbor is a router server's client or not. ``is_route_reflector_client`` specifies whether this neighbor is a router reflector's client or not. ``is_next_hop_self`` specifies whether the BGP speaker announces its own ip address to iBGP neighbor or not as path's next_hop address. ``local_address`` specifies Loopback interface address for iBGP peering. ``local_port`` specifies source TCP port for iBGP peering. ``local_as`` specifies local AS number per-peer. If omitted, the AS number of BGPSpeaker instance is used. ``connect_mode`` specifies how to connect to this neighbor. This parameter must be one of the following. - CONNECT_MODE_ACTIVE = 'active' - CONNECT_MODE_PASSIVE = 'passive' - CONNECT_MODE_BOTH (default) = 'both' """ bgp_neighbor = { neighbors.IP_ADDRESS: address, neighbors.REMOTE_AS: remote_as, REMOTE_PORT: remote_port, PEER_NEXT_HOP: next_hop, PASSWORD: password, IS_ROUTE_SERVER_CLIENT: is_route_server_client, IS_ROUTE_REFLECTOR_CLIENT: is_route_reflector_client, IS_NEXT_HOP_SELF: is_next_hop_self, CONNECT_MODE: connect_mode, CAP_ENHANCED_REFRESH: enable_enhanced_refresh, CAP_FOUR_OCTET_AS_NUMBER: enable_four_octet_as_number, CAP_MBGP_IPV4: enable_ipv4, CAP_MBGP_IPV6: enable_ipv6, CAP_MBGP_VPNV4: enable_vpnv4, CAP_MBGP_VPNV6: enable_vpnv6, CAP_MBGP_EVPN: enable_evpn, CAP_MBGP_IPV4FS: enable_ipv4fs, CAP_MBGP_IPV6FS: enable_ipv6fs, CAP_MBGP_VPNV4FS: enable_vpnv4fs, CAP_MBGP_VPNV6FS: enable_vpnv6fs, CAP_MBGP_L2VPNFS: enable_l2vpnfs, } if multi_exit_disc: bgp_neighbor[MULTI_EXIT_DISC] = multi_exit_disc if site_of_origins: bgp_neighbor[SITE_OF_ORIGINS] = site_of_origins if local_address: bgp_neighbor[LOCAL_ADDRESS] = local_address if local_port: bgp_neighbor[LOCAL_PORT] = local_port if local_as: bgp_neighbor[LOCAL_AS] = local_as call('neighbor.create', **bgp_neighbor)
This method registers a new neighbor. The BGP speaker tries to establish a bgp session with the peer (accepts a connection from the peer and also tries to connect to it). ``address`` specifies the IP address of the peer. It must be the string representation of an IP address. Only IPv4 is supported now. ``remote_as`` specifies the AS number of the peer. It must be an integer between 1 and 65535. ``remote_port`` specifies the TCP port number of the peer. ``enable_ipv4`` enables IPv4 address family for this neighbor. ``enable_ipv6`` enables IPv6 address family for this neighbor. ``enable_vpnv4`` enables VPNv4 address family for this neighbor. ``enable_vpnv6`` enables VPNv6 address family for this neighbor. ``enable_evpn`` enables Ethernet VPN address family for this neighbor. ``enable_ipv4fs`` enables IPv4 Flow Specification address family for this neighbor. ``enable_ipv6fs`` enables IPv6 Flow Specification address family for this neighbor. ``enable_vpnv4fs`` enables VPNv4 Flow Specification address family for this neighbor. ``enable_vpnv6fs`` enables VPNv6 Flow Specification address family for this neighbor. ``enable_l2vpnfs`` enables L2VPN Flow Specification address family for this neighbor. ``enable_enhanced_refresh`` enables Enhanced Route Refresh for this neighbor. ``enable_four_octet_as_number`` enables Four-Octet AS Number capability for this neighbor. ``next_hop`` specifies the next hop IP address. If not specified, host's ip address to access to a peer is used. ``password`` is used for the MD5 authentication if it's specified. By default, the MD5 authentication is disabled. ``multi_exit_disc`` specifies multi exit discriminator (MED) value as an int type value. If omitted, MED is not sent to the neighbor. ``site_of_origins`` specifies site_of_origin values. This parameter must be a list of string. ``is_route_server_client`` specifies whether this neighbor is a router server's client or not. ``is_route_reflector_client`` specifies whether this neighbor is a router reflector's client or not. ``is_next_hop_self`` specifies whether the BGP speaker announces its own ip address to iBGP neighbor or not as path's next_hop address. ``local_address`` specifies Loopback interface address for iBGP peering. ``local_port`` specifies source TCP port for iBGP peering. ``local_as`` specifies local AS number per-peer. If omitted, the AS number of BGPSpeaker instance is used. ``connect_mode`` specifies how to connect to this neighbor. This parameter must be one of the following. - CONNECT_MODE_ACTIVE = 'active' - CONNECT_MODE_PASSIVE = 'passive' - CONNECT_MODE_BOTH (default) = 'both'
def set_attributes(self, **kwargs): """ Set a group of attributes (parameters and members). Calls `setp` directly, so kwargs can include more than just the parameter value (e.g., bounds, free, etc.). """ kwargs = dict(kwargs) for name,value in kwargs.items(): # Raise AttributeError if param not found self.__getattr__(name) # Set attributes try: self.setp(name,**value) except TypeError: try: self.setp(name,*value) except (TypeError,KeyError): self.__setattr__(name,value)
Set a group of attributes (parameters and members). Calls `setp` directly, so kwargs can include more than just the parameter value (e.g., bounds, free, etc.).
def get_groups_of_account_apikey(self, account_id, api_key, **kwargs): # noqa: E501 """Get groups of the API key. # noqa: E501 An endpoint for retrieving groups of the API key. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apiKey}/groups -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_groups_of_account_apikey(account_id, api_key, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str api_key: The ID of the API key whose details are retrieved. (required) :param int limit: The number of results to return (2-1000), default is 50. :param str after: The entity ID to fetch after the given one. :param str order: The order of the records based on creation time, ASC or DESC; by default ASC :param str include: Comma separated additional data to return. Currently supported: total_count :return: GroupSummaryList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_groups_of_account_apikey_with_http_info(account_id, api_key, **kwargs) # noqa: E501 else: (data) = self.get_groups_of_account_apikey_with_http_info(account_id, api_key, **kwargs) # noqa: E501 return data
Get groups of the API key. # noqa: E501 An endpoint for retrieving groups of the API key. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apiKey}/groups -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_groups_of_account_apikey(account_id, api_key, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str api_key: The ID of the API key whose details are retrieved. (required) :param int limit: The number of results to return (2-1000), default is 50. :param str after: The entity ID to fetch after the given one. :param str order: The order of the records based on creation time, ASC or DESC; by default ASC :param str include: Comma separated additional data to return. Currently supported: total_count :return: GroupSummaryList If the method is called asynchronously, returns the request thread.
def __parse_aliases_line(self, raw_alias, raw_username): """Parse aliases lines""" alias = self.__encode(raw_alias) username = self.__encode(raw_username) return alias, username
Parse aliases lines
def calc_piece_size(size, min_piece_size=20, max_piece_size=29, max_piece_count=1000): """ Calculates a good piece size for a size """ logger.debug('Calculating piece size for %i' % size) for i in range(min_piece_size, max_piece_size): # 20 = 1MB if size / (2**i) < max_piece_count: break return 2**i
Calculates a good piece size for a size
def _lscmp(a, b): ''' Compares two strings in a cryptographically save way: Runtime is not affected by length of common prefix. ''' return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
Compares two strings in a cryptographically save way: Runtime is not affected by length of common prefix.
def _sam_to_grouped_umi_cl(data, umi_consensus, tx_out_file): """Mark duplicates on aligner output and convert to grouped UMIs by position. Works with either a separate umi_file or UMI embedded in the read names. """ tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] jvm_opts = _get_fgbio_jvm_opts(data, os.path.dirname(tmp_file), 1) cores, mem = _get_cores_memory(data) bamsormadup = config_utils.get_program("bamsormadup", data) cmd = ("{bamsormadup} tmpfile={tmp_file}-markdup inputformat=sam threads={cores} outputformat=bam " "level=0 SO=coordinate | ") # UMIs in a separate file if os.path.exists(umi_consensus) and os.path.isfile(umi_consensus): cmd += "fgbio {jvm_opts} AnnotateBamWithUmis -i /dev/stdin -f {umi_consensus} -o {tx_out_file}" # UMIs embedded in read name else: cmd += ("%s %s bamtag - | samtools view -b > {tx_out_file}" % (utils.get_program_python("umis"), config_utils.get_program("umis", data["config"]))) return cmd.format(**locals())
Mark duplicates on aligner output and convert to grouped UMIs by position. Works with either a separate umi_file or UMI embedded in the read names.
def do_reparse(self, arg): """Reparses the currently active unit test to get the latest test results loaded to the console. """ #We just get the full path of the currently active test and hit reparse. full = arg == "full" from os import path fullpath = path.abspath(self.tests[self.active].stagedir) self.tests[self.active] = Analysis(fullpath, full)
Reparses the currently active unit test to get the latest test results loaded to the console.
def quote(key, value): """Certain options support string values. We want clients to be able to pass Python strings in but we need them to be quoted in the output. Unfortunately some of those options also allow numbers so we type check the value before wrapping it in quotes. """ if key in quoted_options and isinstance(value, string_types): return "'%s'" % value if key in quoted_bool_options and isinstance(value, bool): return {True:'true',False:'false'}[value] return value
Certain options support string values. We want clients to be able to pass Python strings in but we need them to be quoted in the output. Unfortunately some of those options also allow numbers so we type check the value before wrapping it in quotes.
def com_google_fonts_check_smart_dropout(ttFont): """Font enables smart dropout control in "prep" table instructions? B8 01 FF PUSHW 0x01FF 85 SCANCTRL (unconditinally turn on dropout control mode) B0 04 PUSHB 0x04 8D SCANTYPE (enable smart dropout control) Smart dropout control means activating rules 1, 2 and 5: Rule 1: If a pixel's center falls within the glyph outline, that pixel is turned on. Rule 2: If a contour falls exactly on a pixel's center, that pixel is turned on. Rule 5: If a scan line between two adjacent pixel centers (either vertical or horizontal) is intersected by both an on-Transition contour and an off-Transition contour and neither of the pixels was already turned on by rules 1 and 2, turn on the pixel which is closer to the midpoint between the on-Transition contour and off-Transition contour. This is "Smart" dropout control. """ INSTRUCTIONS = b"\xb8\x01\xff\x85\xb0\x04\x8d" if ("prep" in ttFont and INSTRUCTIONS in ttFont["prep"].program.getBytecode()): yield PASS, ("'prep' table contains instructions" " enabling smart dropout control.") else: yield FAIL, ("'prep' table does not contain TrueType " " instructions enabling smart dropout control." " To fix, export the font with autohinting enabled," " or run ttfautohint on the font, or run the " " `gftools fix-nonhinting` script.")
Font enables smart dropout control in "prep" table instructions? B8 01 FF PUSHW 0x01FF 85 SCANCTRL (unconditinally turn on dropout control mode) B0 04 PUSHB 0x04 8D SCANTYPE (enable smart dropout control) Smart dropout control means activating rules 1, 2 and 5: Rule 1: If a pixel's center falls within the glyph outline, that pixel is turned on. Rule 2: If a contour falls exactly on a pixel's center, that pixel is turned on. Rule 5: If a scan line between two adjacent pixel centers (either vertical or horizontal) is intersected by both an on-Transition contour and an off-Transition contour and neither of the pixels was already turned on by rules 1 and 2, turn on the pixel which is closer to the midpoint between the on-Transition contour and off-Transition contour. This is "Smart" dropout control.
def login(self, username, password, mode="demo"): """login function""" url = "https://trading212.com/it/login" try: logger.debug(f"visiting %s" % url) self.browser.visit(url) logger.debug(f"connected to %s" % url) except selenium.common.exceptions.WebDriverException: logger.critical("connection timed out") raise try: self.search_name("login[username]").fill(username) self.search_name("login[password]").fill(password) self.css1(path['log']).click() # define a timeout for logging in timeout = time.time() + 30 while not self.elCss(path['logo']): if time.time() > timeout: logger.critical("login failed") raise CredentialsException(username) time.sleep(1) logger.info(f"logged in as {username}") # check if it's a weekend if mode == "demo" and datetime.now().isoweekday() in range(5, 8): timeout = time.time() + 10 while not self.elCss(path['alert-box']): if time.time() > timeout: logger.warning("weekend trading alert-box not closed") break if self.elCss(path['alert-box']): self.css1(path['alert-box']).click() logger.debug("weekend trading alert-box closed") except Exception as e: logger.critical("login failed") raise exceptions.BaseExc(e) return True
login function
def maxdiff_dtu_configurations(list_of_objects): """Return DtuConfiguration instance with maximum differences. Parameters ---------- list_of_objects : python list List of DtuConfiguration instances to be averaged. Returns ------- result : DtuConfiguration instance Object with averaged values. """ result = DtuConfiguration() if len(list_of_objects) == 0: return result list_of_members = result.__dict__.keys() # compute maximum difference for each member for member in list_of_members: tmp_array = np.array( [tmp_dtu.__dict__[member] for tmp_dtu in list_of_objects] ) minval = tmp_array.min() maxval = tmp_array.max() result.__dict__[member] = maxval - minval return result
Return DtuConfiguration instance with maximum differences. Parameters ---------- list_of_objects : python list List of DtuConfiguration instances to be averaged. Returns ------- result : DtuConfiguration instance Object with averaged values.
def _generate_sdss_object_name( self): """ *generate sdss object names for the results* **Key Arguments:** # - **Return:** - None .. todo:: """ self.log.info('starting the ``_generate_sdss_object_name`` method') converter = unit_conversion( log=self.log ) # Names should be of the format `SDSS JHHMMSS.ss+DDMMSS.s` # where the coordinates are truncated, not rounded. for row in self.results: raSex = converter.ra_decimal_to_sexegesimal( ra=row["ra"], delimiter=":" ) decSex = converter.dec_decimal_to_sexegesimal( dec=row["dec"], delimiter=":" ) raSex = raSex.replace(":", "")[:9] decSex = decSex.replace(":", "")[:9] sdssName = "SDSS J%(raSex)s%(decSex)s" % locals() row["sdss_name"] = sdssName wordType = ["unknown", "cosmic_ray", "defect", "galaxy", "ghost", "knownobj", "star", "trail", "sky", "notatype", ] numberType = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] row["type"] = wordType[row["type"]] self.log.info('completed the ``_generate_sdss_object_name`` method') return None
*generate sdss object names for the results* **Key Arguments:** # - **Return:** - None .. todo::
def remove(self, observableElement): """ remove an obsrvable element :param str observableElement: the name of the observable element """ if observableElement in self._observables: self._observables.remove(observableElement)
remove an obsrvable element :param str observableElement: the name of the observable element
def location_from_dictionary(d): """ Builds a *Location* object out of a data dictionary. Only certain properties of the dictionary are used: if these properties are not found or cannot be read, an error is issued. :param d: a data dictionary :type d: dict :returns: a *Location* instance :raises: *KeyError* if it is impossible to find or read the data needed to build the instance """ country = None if 'sys' in d and 'country' in d['sys']: country = d['sys']['country'] if 'city' in d: data = d['city'] else: data = d if 'name' in data: name = data['name'] else: name = None if 'id' in data: ID = int(data['id']) else: ID = None if 'coord' in data: lon = data['coord'].get('lon', 0.0) lat = data['coord'].get('lat', 0.0) elif 'coord' in data['station']: if 'lon' in data['station']['coord']: lon = data['station']['coord'].get('lon', 0.0) elif 'lng' in data['station']['coord']: lon = data['station']['coord'].get('lng', 0.0) else: lon = 0.0 lat = data['station']['coord'].get('lat', 0.0) else: raise KeyError("Impossible to read geographical coordinates from JSON") if 'country' in data: country = data['country'] return Location(name, lon, lat, ID, country)
Builds a *Location* object out of a data dictionary. Only certain properties of the dictionary are used: if these properties are not found or cannot be read, an error is issued. :param d: a data dictionary :type d: dict :returns: a *Location* instance :raises: *KeyError* if it is impossible to find or read the data needed to build the instance
def from_csv(cls, filename: str): """ Imports PDEntries from a csv. Args: filename: Filename to import from. Returns: List of Elements, List of PDEntries """ with open(filename, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter=unicode2str(","), quotechar=unicode2str("\""), quoting=csv.QUOTE_MINIMAL) entries = list() header_read = False elements = None for row in reader: if not header_read: elements = row[1:(len(row) - 1)] header_read = True else: name = row[0] energy = float(row[-1]) comp = dict() for ind in range(1, len(row) - 1): if float(row[ind]) > 0: comp[Element(elements[ind - 1])] = float(row[ind]) entries.append(PDEntry(Composition(comp), energy, name)) return cls(entries)
Imports PDEntries from a csv. Args: filename: Filename to import from. Returns: List of Elements, List of PDEntries
def connect(self, ip_address, tsap_snap7, tsap_logo, tcpport=102): """ Connect to a Siemens LOGO server. Howto setup Logo communication configuration see: http://snap7.sourceforge.net/logo.html :param ip_address: IP ip_address of server :param tsap_snap7: TSAP SNAP7 Client (e.g. 10.00 = 0x1000) :param tsap_logo: TSAP Logo Server (e.g. 20.00 = 0x2000) """ logger.info("connecting to %s:%s tsap_snap7 %s tsap_logo %s" % (ip_address, tcpport, tsap_snap7, tsap_logo)) # special handling for Siemens Logo # 1st set connection params # 2nd connect without any parameters self.set_param(snap7.snap7types.RemotePort, tcpport) self.set_connection_params(ip_address, tsap_snap7, tsap_logo) result = self.library.Cli_Connect(self.pointer) check_error(result, context="client") return result
Connect to a Siemens LOGO server. Howto setup Logo communication configuration see: http://snap7.sourceforge.net/logo.html :param ip_address: IP ip_address of server :param tsap_snap7: TSAP SNAP7 Client (e.g. 10.00 = 0x1000) :param tsap_logo: TSAP Logo Server (e.g. 20.00 = 0x2000)
def http_responder_factory(proto): """ The default factory function which creates a GrowlerHTTPResponder with this object as the parent protocol, and the application's req/res factory functions. To change the default responder, overload this method with the same to return your own responder. Params ------ proto : GrowlerHTTPProtocol Explicitly passed protocol object (actually it's what would be 'self'!) Note ---- This method is decorated with @staticmethod, as the connection_made method of GrowlerProtocol explicitly passes `self` as a parameters, instead of treating as a bound method. """ return GrowlerHTTPResponder( proto, request_factory=proto.http_application._request_class, response_factory=proto.http_application._response_class, )
The default factory function which creates a GrowlerHTTPResponder with this object as the parent protocol, and the application's req/res factory functions. To change the default responder, overload this method with the same to return your own responder. Params ------ proto : GrowlerHTTPProtocol Explicitly passed protocol object (actually it's what would be 'self'!) Note ---- This method is decorated with @staticmethod, as the connection_made method of GrowlerProtocol explicitly passes `self` as a parameters, instead of treating as a bound method.
def segments_distance(segment1, segment2): """Calculate the distance between two line segments in the plane. >>> a = LineSegment(Point(1,0), Point(2,0)) >>> b = LineSegment(Point(0,1), Point(0,2)) >>> "%0.2f" % segments_distance(a, b) '1.41' >>> c = LineSegment(Point(0,0), Point(5,5)) >>> d = LineSegment(Point(2,2), Point(4,4)) >>> e = LineSegment(Point(2,2), Point(7,7)) >>> "%0.2f" % segments_distance(c, d) '0.00' >>> "%0.2f" % segments_distance(c, e) '0.00' """ assert isinstance(segment1, LineSegment), \ "segment1 is not a LineSegment, but a %s" % type(segment1) assert isinstance(segment2, LineSegment), \ "segment2 is not a LineSegment, but a %s" % type(segment2) if len(get_segments_intersections(segment1, segment2)) >= 1: return 0 # try each of the 4 vertices w/the other segment distances = [] distances.append(point_segment_distance(segment1.p1, segment2)) distances.append(point_segment_distance(segment1.p2, segment2)) distances.append(point_segment_distance(segment2.p1, segment1)) distances.append(point_segment_distance(segment2.p2, segment1)) return min(distances)
Calculate the distance between two line segments in the plane. >>> a = LineSegment(Point(1,0), Point(2,0)) >>> b = LineSegment(Point(0,1), Point(0,2)) >>> "%0.2f" % segments_distance(a, b) '1.41' >>> c = LineSegment(Point(0,0), Point(5,5)) >>> d = LineSegment(Point(2,2), Point(4,4)) >>> e = LineSegment(Point(2,2), Point(7,7)) >>> "%0.2f" % segments_distance(c, d) '0.00' >>> "%0.2f" % segments_distance(c, e) '0.00'
def ip_to_array(ipaddress): """Convert a string representing an IPv4 address to 4 bytes.""" res = [] for i in ipaddress.split("."): res.append(int(i)) assert len(res) == 4 return res
Convert a string representing an IPv4 address to 4 bytes.
def remove_internal_names(self): """ Set the name of all non-leaf nodes in the subtree to None. """ self.visit(lambda n: setattr(n, 'name', None), lambda n: not n.is_leaf)
Set the name of all non-leaf nodes in the subtree to None.
def _convert_to(maybe_device, convert_to): ''' Convert a device name, UUID or LABEL to a device name, UUID or LABEL. Return the fs_spec required for fstab. ''' # Fast path. If we already have the information required, we can # save one blkid call if not convert_to or \ (convert_to == 'device' and maybe_device.startswith('/')) or \ maybe_device.startswith('{}='.format(convert_to.upper())): return maybe_device # Get the device information if maybe_device.startswith('/'): blkid = __salt__['disk.blkid'](maybe_device) else: blkid = __salt__['disk.blkid'](token=maybe_device) result = None if len(blkid) == 1: if convert_to == 'device': result = list(blkid.keys())[0] else: key = convert_to.upper() result = '{}={}'.format(key, list(blkid.values())[0][key]) return result
Convert a device name, UUID or LABEL to a device name, UUID or LABEL. Return the fs_spec required for fstab.
def pull_log_dump(self, project_name, logstore_name, from_time, to_time, file_path, batch_size=None, compress=None, encodings=None, shard_list=None, no_escape=None): """ dump all logs seperatedly line into file_path, file_path, the time parameters are log received time on server side. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type from_time: string/int :param from_time: curosr value, could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type to_time: string/int :param to_time: curosr value, could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type file_path: string :param file_path: file path with {} for shard id. e.g. "/data/dump_{}.data", {} will be replaced with each partition. :type batch_size: int :param batch_size: batch size to fetch the data in each iteration. by default it's 500 :type compress: bool :param compress: if use compression, by default it's True :type encodings: string list :param encodings: encoding like ["utf8", "latin1"] etc to dumps the logs in json format to file. default is ["utf8",] :type shard_list: string :param shard_list: shard number list. could be comma seperated list or range: 1,20,31-40 :type no_escape: bool :param no_escape: if not_escape the non-ANSI, default is to escape, set it to True if don't want it. :return: LogResponse {"total_count": 30, "files": {'file_path_1': 10, "file_path_2": 20} }) :raise: LogException """ file_path = file_path.replace("{}", "{0}") if "{0}" not in file_path: file_path += "{0}" return pull_log_dump(self, project_name, logstore_name, from_time, to_time, file_path, batch_size=batch_size, compress=compress, encodings=encodings, shard_list=shard_list, no_escape=no_escape)
dump all logs seperatedly line into file_path, file_path, the time parameters are log received time on server side. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type from_time: string/int :param from_time: curosr value, could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type to_time: string/int :param to_time: curosr value, could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type file_path: string :param file_path: file path with {} for shard id. e.g. "/data/dump_{}.data", {} will be replaced with each partition. :type batch_size: int :param batch_size: batch size to fetch the data in each iteration. by default it's 500 :type compress: bool :param compress: if use compression, by default it's True :type encodings: string list :param encodings: encoding like ["utf8", "latin1"] etc to dumps the logs in json format to file. default is ["utf8",] :type shard_list: string :param shard_list: shard number list. could be comma seperated list or range: 1,20,31-40 :type no_escape: bool :param no_escape: if not_escape the non-ANSI, default is to escape, set it to True if don't want it. :return: LogResponse {"total_count": 30, "files": {'file_path_1': 10, "file_path_2": 20} }) :raise: LogException
def get_sensor_code_by_number(si, mtype, sensor_number, quiet=False): """ Given a sensor number, get the full sensor code (e.g. ACCX-UB1-L2C-M) :param si: dict, sensor index json dictionary :param mtype: str, sensor type :param sensor_number: int, number of sensor :param quiet: bool, if true then return None if not found :return: str or None, sensor_code: a sensor code (e.g. ACCX-UB1-L2C-M) """ try: if 'Orientation' in si[mtype][sensor_number]: orientation = si[mtype][sensor_number]['Orientation'] else: orientation = "" return "%s%s-%s-%s-%s" % (mtype, orientation, si[mtype][sensor_number]['X-CODE'], si[mtype][sensor_number]['Y-CODE'], si[mtype][sensor_number]['Z-CODE']) except KeyError: if quiet: return None raise
Given a sensor number, get the full sensor code (e.g. ACCX-UB1-L2C-M) :param si: dict, sensor index json dictionary :param mtype: str, sensor type :param sensor_number: int, number of sensor :param quiet: bool, if true then return None if not found :return: str or None, sensor_code: a sensor code (e.g. ACCX-UB1-L2C-M)
def item_enclosure_length(self, item): """ Try to obtain the size of the enclosure if it's present on the FS, otherwise returns an hardcoded value. Note: this method is only called if item_enclosure_url has returned something. """ try: return str(item.image.size) except (AttributeError, ValueError, os.error): pass return '100000'
Try to obtain the size of the enclosure if it's present on the FS, otherwise returns an hardcoded value. Note: this method is only called if item_enclosure_url has returned something.
def find_users(session, *usernames): """Find multiple users by name.""" user_string = ','.join(usernames) return _make_request(session, FIND_USERS_URL, user_string)
Find multiple users by name.
def sos_get_command_output(command, timeout=300, stderr=False, chroot=None, chdir=None, env=None, binary=False, sizelimit=None, poller=None): """Execute a command and return a dictionary of status and output, optionally changing root or current working directory before executing command. """ # Change root or cwd for child only. Exceptions in the prexec_fn # closure are caught in the parent (chroot and chdir are bound from # the enclosing scope). def _child_prep_fn(): if (chroot): os.chroot(chroot) if (chdir): os.chdir(chdir) cmd_env = os.environ.copy() # ensure consistent locale for collected command output cmd_env['LC_ALL'] = 'C' # optionally add an environment change for the command if env: for key, value in env.items(): if value: cmd_env[key] = value else: cmd_env.pop(key, None) # use /usr/bin/timeout to implement a timeout if timeout and is_executable("timeout"): command = "timeout %ds %s" % (timeout, command) # shlex.split() reacts badly to unicode on older python runtimes. if not six.PY3: command = command.encode('utf-8', 'ignore') args = shlex.split(command) # Expand arguments that are wildcard paths. expanded_args = [] for arg in args: expanded_arg = glob.glob(arg) if expanded_arg: expanded_args.extend(expanded_arg) else: expanded_args.append(arg) try: p = Popen(expanded_args, shell=False, stdout=PIPE, stderr=STDOUT if stderr else PIPE, bufsize=-1, env=cmd_env, close_fds=True, preexec_fn=_child_prep_fn) reader = AsyncReader(p.stdout, sizelimit, binary) if poller: while reader.running: if poller(): p.terminate() raise SoSTimeoutError stdout = reader.get_contents() while p.poll() is None: pass except OSError as e: if e.errno == errno.ENOENT: return {'status': 127, 'output': ""} else: raise e if p.returncode == 126 or p.returncode == 127: stdout = six.binary_type(b"") return { 'status': p.returncode, 'output': stdout }
Execute a command and return a dictionary of status and output, optionally changing root or current working directory before executing command.
def create_volume(self, volume, size, **kwargs): """Create a volume and return a dictionary describing it. :param volume: Name of the volume to be created. :type volume: str :param size: Size in bytes, or string representing the size of the volume to be created. :type size: int or str :param \*\*kwargs: See the REST API Guide on your array for the documentation on the request: **POST volume/:volume** :type \*\*kwargs: optional :returns: A dictionary describing the created volume. :rtype: ResponseDict .. note:: The maximum volume size supported is 4 petabytes (4 * 2^50). .. note:: If size is an int, it must be a multiple of 512. .. note:: If size is a string, it must consist of an integer followed by a valid suffix. Accepted Suffixes ====== ======== ====== Suffix Size Bytes ====== ======== ====== S Sector (2^9) K Kilobyte (2^10) M Megabyte (2^20) G Gigabyte (2^30) T Terabyte (2^40) P Petabyte (2^50) ====== ======== ====== """ data = {"size": size} data.update(kwargs) return self._request("POST", "volume/{0}".format(volume), data)
Create a volume and return a dictionary describing it. :param volume: Name of the volume to be created. :type volume: str :param size: Size in bytes, or string representing the size of the volume to be created. :type size: int or str :param \*\*kwargs: See the REST API Guide on your array for the documentation on the request: **POST volume/:volume** :type \*\*kwargs: optional :returns: A dictionary describing the created volume. :rtype: ResponseDict .. note:: The maximum volume size supported is 4 petabytes (4 * 2^50). .. note:: If size is an int, it must be a multiple of 512. .. note:: If size is a string, it must consist of an integer followed by a valid suffix. Accepted Suffixes ====== ======== ====== Suffix Size Bytes ====== ======== ====== S Sector (2^9) K Kilobyte (2^10) M Megabyte (2^20) G Gigabyte (2^30) T Terabyte (2^40) P Petabyte (2^50) ====== ======== ======
def _send_resource(self, environ, start_response, is_head_method): """ If-Range If the entity is unchanged, send me the part(s) that I am missing; otherwise, send me the entire new entity If-Range: "737060cd8c284d8af7ad3082f209582d" @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.27 """ path = environ["PATH_INFO"] res = self._davProvider.get_resource_inst(path, environ) if util.get_content_length(environ) != 0: self._fail( HTTP_MEDIATYPE_NOT_SUPPORTED, "The server does not handle any body content.", ) elif environ.setdefault("HTTP_DEPTH", "0") != "0": self._fail(HTTP_BAD_REQUEST, "Only Depth: 0 supported.") elif res is None: self._fail(HTTP_NOT_FOUND) elif res.is_collection: self._fail( HTTP_FORBIDDEN, "Directory browsing is not enabled." "(to enable it put WsgiDavDirBrowser into middleware_stack" "option and set dir_browser -> enabled = True option.)", ) self._evaluate_if_headers(res, environ) filesize = res.get_content_length() if filesize is None: filesize = -1 # flag logic to read until EOF last_modified = res.get_last_modified() if last_modified is None: last_modified = -1 entitytag = res.get_etag() if entitytag is None: entitytag = "[]" # Ranges doignoreranges = ( not res.support_content_length() or not res.support_ranges() or filesize == 0 ) if ( "HTTP_RANGE" in environ and "HTTP_IF_RANGE" in environ and not doignoreranges ): ifrange = environ["HTTP_IF_RANGE"] # Try as http-date first (Return None, if invalid date string) secstime = util.parse_time_string(ifrange) if secstime: if last_modified != secstime: doignoreranges = True else: # Use as entity tag ifrange = ifrange.strip('" ') if entitytag is None or ifrange != entitytag: doignoreranges = True ispartialranges = False if "HTTP_RANGE" in environ and not doignoreranges: ispartialranges = True list_ranges, _totallength = util.obtain_content_ranges( environ["HTTP_RANGE"], filesize ) if len(list_ranges) == 0: # No valid ranges present self._fail(HTTP_RANGE_NOT_SATISFIABLE) # More than one range present -> take only the first range, since # multiple range returns require multipart, which is not supported # obtain_content_ranges supports more than one range in case the above # behaviour changes in future (range_start, range_end, range_length) = list_ranges[0] else: (range_start, range_end, range_length) = (0, filesize - 1, filesize) # Content Processing mimetype = res.get_content_type() # provider.get_content_type(path) response_headers = [] if res.support_content_length(): # Content-length must be of type string response_headers.append(("Content-Length", str(range_length))) if res.support_modified(): response_headers.append( ("Last-Modified", util.get_rfc1123_time(last_modified)) ) response_headers.append(("Content-Type", mimetype)) response_headers.append(("Date", util.get_rfc1123_time())) if res.support_etag(): response_headers.append(("ETag", '"{}"'.format(entitytag))) if "response_headers" in environ["wsgidav.config"]: customHeaders = environ["wsgidav.config"]["response_headers"] for header, value in customHeaders: response_headers.append((header, value)) res.finalize_headers(environ, response_headers) if ispartialranges: # response_headers.append(("Content-Ranges", "bytes " + str(range_start) + "-" + # str(range_end) + "/" + str(range_length))) response_headers.append( ( "Content-Range", "bytes {}-{}/{}".format(range_start, range_end, filesize), ) ) start_response("206 Partial Content", response_headers) else: start_response("200 OK", response_headers) # Return empty body for HEAD requests if is_head_method: yield b"" return fileobj = res.get_content() if not doignoreranges: fileobj.seek(range_start) contentlengthremaining = range_length while 1: if contentlengthremaining < 0 or contentlengthremaining > self.block_size: readbuffer = fileobj.read(self.block_size) else: readbuffer = fileobj.read(contentlengthremaining) assert compat.is_bytes(readbuffer) yield readbuffer contentlengthremaining -= len(readbuffer) if len(readbuffer) == 0 or contentlengthremaining == 0: break fileobj.close() return
If-Range If the entity is unchanged, send me the part(s) that I am missing; otherwise, send me the entire new entity If-Range: "737060cd8c284d8af7ad3082f209582d" @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.27
def can_rename(self): # type: (LocalSourcePaths) -> bool """Check if source can be renamed :param LocalSourcePath self: this :rtype: bool :return: if rename possible """ return len(self._paths) == 1 and ( self._paths[0].is_file() or blobxfer.models.upload.LocalSourcePath.is_stdin( str(self._paths[0])) )
Check if source can be renamed :param LocalSourcePath self: this :rtype: bool :return: if rename possible
def set_frame_parameters(self, profile_index: int, frame_parameters) -> None: """Set the frame parameters with the settings index and fire the frame parameters changed event. If the settings index matches the current settings index, call set current frame parameters. If the settings index matches the record settings index, call set record frame parameters. """ self.frame_parameters_changed_event.fire(profile_index, frame_parameters)
Set the frame parameters with the settings index and fire the frame parameters changed event. If the settings index matches the current settings index, call set current frame parameters. If the settings index matches the record settings index, call set record frame parameters.
def _cleanup_workflow(config, task_id, args, **kwargs): """ Cleanup the results of a workflow when it finished. Connects to the postrun signal of Celery. If the signal was sent by a workflow, remove the result from the result backend. Args: task_id (str): The id of the task. args (tuple): The arguments the task was started with. **kwargs: Keyword arguments from the hook. """ from lightflow.models import Workflow if isinstance(args[0], Workflow): if config.celery['result_expires'] == 0: AsyncResult(task_id).forget()
Cleanup the results of a workflow when it finished. Connects to the postrun signal of Celery. If the signal was sent by a workflow, remove the result from the result backend. Args: task_id (str): The id of the task. args (tuple): The arguments the task was started with. **kwargs: Keyword arguments from the hook.
def to_(self, data_pts): """Reverse of :meth:`from_`.""" data_pts = np.asarray(data_pts, dtype=np.float) has_z = (data_pts.shape[-1] > 2) if self.use_center: data_pts = data_pts - self.viewer.data_off # subtract data indexes at center reference pixel ref_pt = [self.viewer._org_x, self.viewer._org_y] if has_z: ref_pt.append(self.viewer._org_z) off_pts = np.subtract(data_pts, ref_pt) return off_pts
Reverse of :meth:`from_`.
def info(self, exp_path=False, project_path=False, global_path=False, config_path=False, complete=False, no_fix=False, on_projects=False, on_globals=False, projectname=None, return_dict=False, insert_id=True, only_keys=False, archives=False, **kwargs): """ Print information on the experiments Parameters ---------- exp_path: bool If True/set, print the filename of the experiment configuration project_path: bool If True/set, print the filename on the project configuration global_path: bool If True/set, print the filename on the global configuration config_path: bool If True/set, print the path to the configuration directory complete: bool If True/set, the information on all experiments are printed no_fix: bool If set, paths are given relative to the root directory of the project on_projects: bool If set, show information on the projects rather than the experiment on_globals: bool If set, show the global configuration settings projectname: str The name of the project that shall be used. If provided and `on_projects` is not True, the information on all experiments for this project will be shown return_dict: bool If True, the dictionary is returned instead of printed insert_id: bool If True and neither `on_projects`, nor `on_globals`, nor `projectname` is given, the experiment id is inserted in the dictionary only_keys: bool If True, only the keys of the given dictionary are printed archives: bool If True, print the archives and the corresponding experiments for the specified project """ self.app_main(**kwargs) def get_archives(project): ret = OrderedDict() for exp, a in self.config.experiments.items(): if self.is_archived(exp) and a.project == project: ret.setdefault(str(a), []).append(exp) return ret paths = OrderedDict([ ('conf_dir', config_path), ('_globals_file', global_path)]) if any(paths.values()): for key, val in paths.items(): if val: return (self.print_ or six.print_)(getattr( self.config, key)) return if archives: base = OrderedDict() current = projectname or self.projectname if complete: for project in self.config.projects.keys(): d = get_archives(project) if d: base[project] = d else: base[current] = get_archives(current) elif exp_path: current = self.experiment base = self.config.experiments.exp_files elif project_path: current = self.projectname base = OrderedDict( (key, osp.join(val, '.project', '.project.yml')) for key, val in self.config.projects.project_paths.items()) elif on_globals: complete = True no_fix = True base = self.config.global_config elif on_projects: base = OrderedDict(self.config.projects) current = projectname or self.projectname else: current = self.experiment if projectname is None: if insert_id: base = copy.deepcopy(self.config.experiments) if not complete: base[current]['id'] = current if six.PY3: base[current].move_to_end('id', last=False) else: base = self.config.experiments if not only_keys: # make sure the experiments are loaded if complete: base.load() else: base[current] # convert to an OrderedDict base = base.as_ordereddict() else: base = OrderedDict( (exp, self.config.experiments[exp]) for exp in self.config.experiments.project_map[projectname] ) complete = True if no_fix and not (archives or on_globals): for key, val in base.items(): if isinstance(val, dict): base[key] = self.rel_paths(copy.deepcopy(val)) if not complete: base = base[current] if only_keys: base = list(base.keys()) if not return_dict: if isinstance(base, six.string_types): ret = base else: ret = ordered_yaml_dump(base, default_flow_style=False) return (self.print_ or six.print_)(ret.rstrip()) else: return base
Print information on the experiments Parameters ---------- exp_path: bool If True/set, print the filename of the experiment configuration project_path: bool If True/set, print the filename on the project configuration global_path: bool If True/set, print the filename on the global configuration config_path: bool If True/set, print the path to the configuration directory complete: bool If True/set, the information on all experiments are printed no_fix: bool If set, paths are given relative to the root directory of the project on_projects: bool If set, show information on the projects rather than the experiment on_globals: bool If set, show the global configuration settings projectname: str The name of the project that shall be used. If provided and `on_projects` is not True, the information on all experiments for this project will be shown return_dict: bool If True, the dictionary is returned instead of printed insert_id: bool If True and neither `on_projects`, nor `on_globals`, nor `projectname` is given, the experiment id is inserted in the dictionary only_keys: bool If True, only the keys of the given dictionary are printed archives: bool If True, print the archives and the corresponding experiments for the specified project
def squared_toroidal_dist(p1, p2, world_size=(60, 60)): """ Separated out because sqrt has a lot of overhead """ halfx = world_size[0]/2.0 if world_size[0] == world_size[1]: halfy = halfx else: halfy = world_size[1]/2.0 deltax = p1[0] - p2[0] if deltax < -halfx: deltax += world_size[0] elif deltax > halfx: deltax -= world_size[0] deltay = p1[1] - p2[1] if deltay < -halfy: deltay += world_size[1] elif deltay > halfy: deltay -= world_size[1] return deltax*deltax + deltay*deltay
Separated out because sqrt has a lot of overhead
def _make_sprite_image(images, save_path): """Given an NDArray as a batch images, make a sprite image out of it following the rule defined in https://www.tensorflow.org/programmers_guide/embedding and save it in sprite.png under the path provided by the user.""" if isinstance(images, np.ndarray): images = nd.array(images, dtype=images.dtype, ctx=current_context()) elif not isinstance(images, (NDArray, np.ndarray)): raise TypeError('images must be an MXNet NDArray or numpy.ndarray,' ' while received type {}'.format(str(type(images)))) assert isinstance(images, NDArray) shape = images.shape nrow = int(np.ceil(np.sqrt(shape[0]))) _save_image( images, os.path.join(save_path, 'sprite.png'), nrow=nrow, padding=0, square_image=True)
Given an NDArray as a batch images, make a sprite image out of it following the rule defined in https://www.tensorflow.org/programmers_guide/embedding and save it in sprite.png under the path provided by the user.
def phase_fraction(im, normed=True): r""" Calculates the number (or fraction) of each phase in an image Parameters ---------- im : ND-array An ND-array containing integer values normed : Boolean If ``True`` (default) the returned values are normalized by the total number of voxels in image, otherwise the voxel count of each phase is returned. Returns ------- result : 1D-array A array of length max(im) with each element containing the number of voxels found with the corresponding label. See Also -------- porosity """ if im.dtype == bool: im = im.astype(int) elif im.dtype != int: raise Exception('Image must contain integer values for each phase') labels = sp.arange(0, sp.amax(im)+1) results = sp.zeros_like(labels) for i in labels: results[i] = sp.sum(im == i) if normed: results = results/im.size return results
r""" Calculates the number (or fraction) of each phase in an image Parameters ---------- im : ND-array An ND-array containing integer values normed : Boolean If ``True`` (default) the returned values are normalized by the total number of voxels in image, otherwise the voxel count of each phase is returned. Returns ------- result : 1D-array A array of length max(im) with each element containing the number of voxels found with the corresponding label. See Also -------- porosity
def calc_route_info(self, real_time=True, stop_at_bounds=False, time_delta=0): """Calculate best route info.""" route = self.get_route(1, time_delta) results = route['results'] route_time, route_distance = self._add_up_route(results, real_time=real_time, stop_at_bounds=stop_at_bounds) self.log.info('Time %.2f minutes, distance %.2f km.', route_time, route_distance) return route_time, route_distance
Calculate best route info.
def listdir(self, path='.'): """ Gets an list of the contents of path in (s)FTP """ self._connect() if self.sftp: contents = self._sftp_listdir(path) else: contents = self._ftp_listdir(path) self._close() return contents
Gets an list of the contents of path in (s)FTP
def exclude(self, *args, **kwargs): """ Works just like the default Manager's :func:`exclude` method, but you can pass an additional keyword argument named ``path`` specifying the full **path of the folder whose immediate child objects** you want to exclude, e.g. ``"path/to/folder"``. """ if 'path' in kwargs: kwargs = self.get_filter_args_with_path(False, **kwargs) return super(FileNodeManager, self).exclude(*args, **kwargs)
Works just like the default Manager's :func:`exclude` method, but you can pass an additional keyword argument named ``path`` specifying the full **path of the folder whose immediate child objects** you want to exclude, e.g. ``"path/to/folder"``.
def get_kafka_brokers(): """ Parses the KAKFA_URL and returns a list of hostname:port pairs in the format that kafka-python expects. """ # NOTE: The Kafka environment variables need to be present. If using # Apache Kafka on Heroku, they will be available in your app configuration. if not os.environ.get('KAFKA_URL'): raise RuntimeError('The KAFKA_URL config variable is not set.') return ['{}:{}'.format(parsedUrl.hostname, parsedUrl.port) for parsedUrl in [urlparse(url) for url in os.environ.get('KAFKA_URL').split(',')]]
Parses the KAKFA_URL and returns a list of hostname:port pairs in the format that kafka-python expects.
def create_from_fits(cls, fitsfile, norm_type='eflux', hdu_scan="SCANDATA", hdu_energies="EBOUNDS", irow=None): """Create a CastroData object from a tscube FITS file. Parameters ---------- fitsfile : str Name of the fits file norm_type : str Type of normalization to use. Valid options are: * norm : Normalization w.r.t. to test source * flux : Flux of the test source ( ph cm^-2 s^-1 ) * eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 ) * npred: Number of predicted photons (Not implemented) * dnde : Differential flux of the test source ( ph cm^-2 s^-1 MeV^-1 ) hdu_scan : str Name of the FITS HDU with the scan data hdu_energies : str Name of the FITS HDU with the energy binning and normalization data irow : int or None If none, then this assumes that there is a single row in the scan data table Otherwise, this specifies which row of the table to use Returns ------- castro : `~fermipy.castro.CastroData` """ if irow is not None: tab_s = Table.read(fitsfile, hdu=hdu_scan)[irow] else: tab_s = Table.read(fitsfile, hdu=hdu_scan) tab_e = Table.read(fitsfile, hdu=hdu_energies) tab_s = convert_sed_cols(tab_s) tab_e = convert_sed_cols(tab_e) return cls.create_from_tables(norm_type, tab_s, tab_e)
Create a CastroData object from a tscube FITS file. Parameters ---------- fitsfile : str Name of the fits file norm_type : str Type of normalization to use. Valid options are: * norm : Normalization w.r.t. to test source * flux : Flux of the test source ( ph cm^-2 s^-1 ) * eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 ) * npred: Number of predicted photons (Not implemented) * dnde : Differential flux of the test source ( ph cm^-2 s^-1 MeV^-1 ) hdu_scan : str Name of the FITS HDU with the scan data hdu_energies : str Name of the FITS HDU with the energy binning and normalization data irow : int or None If none, then this assumes that there is a single row in the scan data table Otherwise, this specifies which row of the table to use Returns ------- castro : `~fermipy.castro.CastroData`
def __upload(self, resource, bytes): """Performs a single chunk upload.""" # note: string conversion required here due to open encoding bug in requests-oauthlib. headers = { 'x-ton-expires': http_time(self.options.get('x-ton-expires', self._DEFAULT_EXPIRE)), 'content-length': str(self._file_size), 'content-type': self.content_type } return Request(self._client, 'post', resource, domain=self._DEFAULT_DOMAIN, headers=headers, body=bytes).perform()
Performs a single chunk upload.
def start (self): ''' Starts (Subscribes) the client. ''' self.sub = rospy.Subscriber(self.topic, ImageROS, self.__callback)
Starts (Subscribes) the client.
def restore_repository_from_recycle_bin(self, repository_details, project, repository_id): """RestoreRepositoryFromRecycleBin. [Preview API] Recover a soft-deleted Git repository. Recently deleted repositories go into a soft-delete state for a period of time before they are hard deleted and become unrecoverable. :param :class:`<GitRecycleBinRepositoryDetails> <azure.devops.v5_1.git.models.GitRecycleBinRepositoryDetails>` repository_details: :param str project: Project ID or project name :param str repository_id: The ID of the repository. :rtype: :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') content = self._serialize.body(repository_details, 'GitRecycleBinRepositoryDetails') response = self._send(http_method='PATCH', location_id='a663da97-81db-4eb3-8b83-287670f63073', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('GitRepository', response)
RestoreRepositoryFromRecycleBin. [Preview API] Recover a soft-deleted Git repository. Recently deleted repositories go into a soft-delete state for a period of time before they are hard deleted and become unrecoverable. :param :class:`<GitRecycleBinRepositoryDetails> <azure.devops.v5_1.git.models.GitRecycleBinRepositoryDetails>` repository_details: :param str project: Project ID or project name :param str repository_id: The ID of the repository. :rtype: :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>`
def get(cls, resource_id=None, parent_id=None, grandparent_id=None): """Retrieves the required resources. :param resource_id: The identifier for the specific resource within the resource type. :param parent_id: The identifier for the specific ancestor resource within the resource type. :param grandparent_id: The identifier that is associated with network objects that are ancestors of the parent of the necessary resource. """ if not resource_id: return cls._get_all(parent_id, grandparent_id) else: return cls._get(resource_id, parent_id, grandparent_id)
Retrieves the required resources. :param resource_id: The identifier for the specific resource within the resource type. :param parent_id: The identifier for the specific ancestor resource within the resource type. :param grandparent_id: The identifier that is associated with network objects that are ancestors of the parent of the necessary resource.
def sendstop(self): ''' Kill process (:meth:`subprocess.Popen.terminate`). Do not wait for command to complete. :rtype: self ''' if not self.is_started: raise EasyProcessError(self, 'process was not started!') log.debug('stopping process (pid=%s cmd="%s")', self.pid, self.cmd) if self.popen: if self.is_alive(): log.debug('process is active -> sending SIGTERM') try: try: self.popen.terminate() except AttributeError: os.kill(self.popen.pid, signal.SIGKILL) except OSError as oserror: log.debug('exception in terminate:%s', oserror) else: log.debug('process was already stopped') else: log.debug('process was not started') return self
Kill process (:meth:`subprocess.Popen.terminate`). Do not wait for command to complete. :rtype: self
def unmarshal(self, value, bind_client=None): """ Cast the specified value to the entity type. """ #self.log.debug("Unmarshall {0!r}: {1!r}".format(self, value)) if not isinstance(value, self.type): o = self.type() if bind_client is not None and hasattr(o.__class__, 'bind_client'): o.bind_client = bind_client if isinstance(value, dict): for (k, v) in value.items(): if not hasattr(o.__class__, k): self.log.warning("Unable to set attribute {0} on entity {1!r}".format(k, o)) else: #self.log.debug("Setting attribute {0} on entity {1!r}".format(k, o)) setattr(o, k, v) value = o else: raise Exception("Unable to unmarshall object {0!r}".format(value)) return value
Cast the specified value to the entity type.
def create(self, ospf_process_id, vrf=None): """Creates a OSPF process in the specified VRF or the default VRF. Args: ospf_process_id (str): The OSPF process Id value vrf (str): The VRF to apply this OSPF process to Returns: bool: True if the command completed successfully Exception: ValueError: If the ospf_process_id passed in less than 0 or greater than 65536 """ value = int(ospf_process_id) if not 0 < value < 65536: raise ValueError('ospf as must be between 1 and 65535') command = 'router ospf {}'.format(ospf_process_id) if vrf: command += ' vrf %s' % vrf return self.configure(command)
Creates a OSPF process in the specified VRF or the default VRF. Args: ospf_process_id (str): The OSPF process Id value vrf (str): The VRF to apply this OSPF process to Returns: bool: True if the command completed successfully Exception: ValueError: If the ospf_process_id passed in less than 0 or greater than 65536
def propose_value(self, value, assume_leader=False): """ Proposes a value to the network. """ if value is None: raise ValueError("Not allowed to propose value None") paxos = self.paxos_instance paxos.leader = assume_leader msg = paxos.propose_value(value) if msg is None: msg = paxos.prepare() self.setattrs_from_paxos(paxos) self.announce(msg) return msg
Proposes a value to the network.
def move(self, remote_path_from, remote_path_to, overwrite=False): """Moves resource from one place to another on WebDAV server. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_MOVE :param remote_path_from: the path to resource which will be moved, :param remote_path_to: the path where resource will be moved. :param overwrite: (optional) the flag, overwrite file if it exists. Defaults is False """ urn_from = Urn(remote_path_from) if not self.check(urn_from.path()): raise RemoteResourceNotFound(urn_from.path()) urn_to = Urn(remote_path_to) if not self.check(urn_to.parent()): raise RemoteParentNotFound(urn_to.path()) header_destination = f'Destination: {self.get_full_path(urn_to)}' header_overwrite = f'Overwrite: {"T" if overwrite else "F"}' self.execute_request(action='move', path=urn_from.quote(), headers_ext=[header_destination, header_overwrite])
Moves resource from one place to another on WebDAV server. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_MOVE :param remote_path_from: the path to resource which will be moved, :param remote_path_to: the path where resource will be moved. :param overwrite: (optional) the flag, overwrite file if it exists. Defaults is False
def _get_config(self, path=None): """ Get the config. :rtype: dict """ if not path and not self.option("config"): raise Exception("The --config|-c option is missing.") if not path: path = self.option("config") filename, ext = os.path.splitext(path) if ext in [".yml", ".yaml"]: with open(path) as fd: config = yaml.load(fd) elif ext in [".py"]: config = {} with open(path) as fh: exec(fh.read(), {}, config) else: raise RuntimeError("Config file [%s] is not supported." % path) return config
Get the config. :rtype: dict
def data(self, name, chunk, body): """ Issue a DATA command return None Sends a chunk of data to a peer. """ self.callRemote(Data, name=name, chunk=chunk, body=body)
Issue a DATA command return None Sends a chunk of data to a peer.
def evaluate_report(report): """Iterate over validation errors.""" if report["valid"]: return for warn in report["warnings"]: LOGGER.warning(warn) # We only ever test one table at a time. for err in report["tables"][0]["errors"]: LOGGER.error(err["message"]) raise ValueError("Invalid data file. Please see errors above.")
Iterate over validation errors.
def add_generator_action(self, action): """ Attach/add one :class:`GeneratorAction`. Warning: The order in which you add :class:`GeneratorAction` objects **is** important in case of conflicting :class:`GeneratorAction` objects: **only** the **first compatible** :class:`GeneratorAction` object will be used to generate the (source code) files. """ if not isinstance(action, GeneratorAction): raise RuntimeError('Can not add a none GeneratorAction object.') self.__generator_actions.append(action)
Attach/add one :class:`GeneratorAction`. Warning: The order in which you add :class:`GeneratorAction` objects **is** important in case of conflicting :class:`GeneratorAction` objects: **only** the **first compatible** :class:`GeneratorAction` object will be used to generate the (source code) files.
def add_command_line_options(cls, parser): """function to inject command line parameters""" if "add_argument" in dir(parser): return cls.add_command_line_options_argparse(parser) else: return cls.add_command_line_options_optparse(parser)
function to inject command line parameters
def add_color(self, name, model, description): r"""Add a color that can be used throughout the document. Args ---- name: str Name to set for the color model: str The color model to use when defining the color description: str The values to use to define the color """ if self.color is False: self.packages.append(Package("color")) self.color = True self.preamble.append(Command("definecolor", arguments=[name, model, description]))
r"""Add a color that can be used throughout the document. Args ---- name: str Name to set for the color model: str The color model to use when defining the color description: str The values to use to define the color
def dump_table_as_insert_sql(engine: Engine, table_name: str, fileobj: TextIO, wheredict: Dict[str, Any] = None, include_ddl: bool = False, multirow: bool = False) -> None: """ Reads a table from the database, and writes SQL to replicate the table's data to the output ``fileobj``. Args: engine: SQLAlchemy :class:`Engine` table_name: name of the table fileobj: file-like object to write to wheredict: optional dictionary of ``{column_name: value}`` to use as ``WHERE`` filters include_ddl: if ``True``, include the DDL to create the table as well multirow: write multi-row ``INSERT`` statements """ # http://stackoverflow.com/questions/5631078/sqlalchemy-print-the-actual-query # noqa # http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html # http://www.tylerlesmann.com/2009/apr/27/copying-databases-across-platforms-sqlalchemy/ # noqa # https://github.com/plq/scripts/blob/master/pg_dump.py log.info("dump_data_as_insert_sql: table_name={}", table_name) writelines_nl(fileobj, [ SEP1, sql_comment("Data for table: {}".format(table_name)), SEP2, sql_comment("Filters: {}".format(wheredict)), ]) dialect = engine.dialect if not dialect.supports_multivalues_insert: multirow = False if multirow: log.warning("dump_data_as_insert_sql: multirow parameter substitution " "not working yet") multirow = False # literal_query = make_literal_query_fn(dialect) meta = MetaData(bind=engine) log.debug("... retrieving schema") table = Table(table_name, meta, autoload=True) if include_ddl: log.debug("... producing DDL") dump_ddl(table.metadata, dialect_name=engine.dialect.name, fileobj=fileobj) # NewRecord = quick_mapper(table) # columns = table.columns.keys() log.debug("... fetching records") # log.debug("meta: {}", meta) # obscures password # log.debug("table: {}", table) # log.debug("table.columns: {!r}", table.columns) # log.debug("multirow: {}", multirow) query = select(table.columns) if wheredict: for k, v in wheredict.items(): col = table.columns.get(k) query = query.where(col == v) # log.debug("query: {}", query) cursor = engine.execute(query) if multirow: row_dict_list = [] for r in cursor: row_dict_list.append(dict(r)) # log.debug("row_dict_list: {}", row_dict_list) if row_dict_list: statement = table.insert().values(row_dict_list) # log.debug("statement: {!r}", statement) # insert_str = literal_query(statement) insert_str = get_literal_query(statement, bind=engine) # NOT WORKING FOR MULTIROW INSERTS. ONLY SUBSTITUTES FIRST ROW. writeline_nl(fileobj, insert_str) else: writeline_nl(fileobj, sql_comment("No data!")) else: found_one = False for r in cursor: found_one = True row_dict = dict(r) statement = table.insert(values=row_dict) # insert_str = literal_query(statement) insert_str = get_literal_query(statement, bind=engine) # log.debug("row_dict: {}", row_dict) # log.debug("insert_str: {}", insert_str) writeline_nl(fileobj, insert_str) if not found_one: writeline_nl(fileobj, sql_comment("No data!")) writeline_nl(fileobj, SEP2) log.debug("... done")
Reads a table from the database, and writes SQL to replicate the table's data to the output ``fileobj``. Args: engine: SQLAlchemy :class:`Engine` table_name: name of the table fileobj: file-like object to write to wheredict: optional dictionary of ``{column_name: value}`` to use as ``WHERE`` filters include_ddl: if ``True``, include the DDL to create the table as well multirow: write multi-row ``INSERT`` statements
def imread(files, **kwargs): """Return image data from TIFF file(s) as numpy array. Refer to the TiffFile and TiffSequence classes and their asarray functions for documentation. Parameters ---------- files : str, binary stream, or sequence File name, seekable binary stream, glob pattern, or sequence of file names. kwargs : dict Parameters 'name', 'offset', 'size', 'multifile', and 'is_ome' are passed to the TiffFile constructor. The 'pattern' parameter is passed to the TiffSequence constructor. Other parameters are passed to the asarray functions. The first image series in the file is returned if no arguments are provided. """ kwargs_file = parse_kwargs(kwargs, 'is_ome', 'multifile', '_useframes', 'name', 'offset', 'size', 'multifile_close', 'fastij', 'movie') # legacy kwargs_seq = parse_kwargs(kwargs, 'pattern') if kwargs.get('pages', None) is not None: if kwargs.get('key', None) is not None: raise TypeError( "the 'pages' and 'key' arguments cannot be used together") log.warning("imread: the 'pages' argument is deprecated") kwargs['key'] = kwargs.pop('pages') if isinstance(files, basestring) and any(i in files for i in '?*'): files = glob.glob(files) if not files: raise ValueError('no files found') if not hasattr(files, 'seek') and len(files) == 1: files = files[0] if isinstance(files, basestring) or hasattr(files, 'seek'): with TiffFile(files, **kwargs_file) as tif: return tif.asarray(**kwargs) else: with TiffSequence(files, **kwargs_seq) as imseq: return imseq.asarray(**kwargs)
Return image data from TIFF file(s) as numpy array. Refer to the TiffFile and TiffSequence classes and their asarray functions for documentation. Parameters ---------- files : str, binary stream, or sequence File name, seekable binary stream, glob pattern, or sequence of file names. kwargs : dict Parameters 'name', 'offset', 'size', 'multifile', and 'is_ome' are passed to the TiffFile constructor. The 'pattern' parameter is passed to the TiffSequence constructor. Other parameters are passed to the asarray functions. The first image series in the file is returned if no arguments are provided.
def save(self, filepath=None, filename=None, mode="md"): """保存答案为 Html 文档或 markdown 文档. :param str filepath: 要保存的文件所在的目录, 不填为当前目录下以专栏标题命名的目录, 设为"."则为当前目录。 :param str filename: 要保存的文件名, 不填则默认为 所在文章标题 - 作者名.html/md。 如果文件已存在,自动在后面加上数字区分。 **自定义文件名时请不要输入后缀 .html 或 .md。** :param str mode: 保存类型,可选 `html` 、 `markdown` 、 `md` 。 :return: 无 :rtype: None """ if mode not in ["html", "md", "markdown"]: raise ValueError("`mode` must be 'html', 'markdown' or 'md'," " got {0}".format(mode)) self._make_soup() file = get_path(filepath, filename, mode, self.column.name, self.title + '-' + self.author.name) with open(file, 'wb') as f: if mode == "html": f.write(self.soup['content'].encode('utf-8')) else: import html2text h2t = html2text.HTML2Text() h2t.body_width = 0 f.write(h2t.handle(self.soup['content']).encode('utf-8'))
保存答案为 Html 文档或 markdown 文档. :param str filepath: 要保存的文件所在的目录, 不填为当前目录下以专栏标题命名的目录, 设为"."则为当前目录。 :param str filename: 要保存的文件名, 不填则默认为 所在文章标题 - 作者名.html/md。 如果文件已存在,自动在后面加上数字区分。 **自定义文件名时请不要输入后缀 .html 或 .md。** :param str mode: 保存类型,可选 `html` 、 `markdown` 、 `md` 。 :return: 无 :rtype: None
def send_reminder(self, user, sender=None, **kwargs): """Sends a reminder email to the specified user""" if user.is_active: return False token = RegistrationTokenGenerator().make_token(user) kwargs.update({"token": token}) self.email_message( user, self.reminder_subject, self.reminder_body, sender, **kwargs ).send()
Sends a reminder email to the specified user
def nmb_weights_hidden(self) -> int: """Number of hidden weights. >>> from hydpy import ANN >>> ann = ANN(None) >>> ann(nmb_inputs=2, nmb_neurons=(4, 3, 2), nmb_outputs=3) >>> ann.nmb_weights_hidden 18 """ nmb = 0 for idx_layer in range(self.nmb_layers-1): nmb += self.nmb_neurons[idx_layer] * self.nmb_neurons[idx_layer+1] return nmb
Number of hidden weights. >>> from hydpy import ANN >>> ann = ANN(None) >>> ann(nmb_inputs=2, nmb_neurons=(4, 3, 2), nmb_outputs=3) >>> ann.nmb_weights_hidden 18
def phot(fits_filename, x_in, y_in, aperture=15, sky=20, swidth=10, apcor=0.3, maxcount=30000.0, exptime=1.0, zmag=None, extno=0, centroid=True): """ Compute the centroids and magnitudes of a bunch sources on fits image. :rtype : astropy.table.Table :param fits_filename: Name of fits image to measure source photometry on. :type fits_filename: str :param x_in: x location of source to measure :type x_in: float, numpy.array :param y_in: y location of source to measure :type y_in: float, numpy.array :param aperture: radius of circular aperture to use. :type aperture: float :param sky: radius of inner sky annulus :type sky: float :param swidth: width of the sky annulus :type swidth: float :param apcor: Aperture correction to take aperture flux to full flux. :type apcor: float :param maxcount: maximum linearity in the image. :type maxcount: float :param exptime: exposure time, relative to zmag supplied :type exptime: float :param zmag: zeropoint magnitude :param extno: extension of fits_filename the x/y location refers to. """ if not hasattr(x_in, '__iter__'): x_in = [x_in, ] if not hasattr(y_in, '__iter__'): y_in = [y_in, ] if (not os.path.exists(fits_filename) and not fits_filename.endswith(".fits")): # For convenience, see if we just forgot to provide the extension fits_filename += ".fits" try: input_hdulist = fits.open(fits_filename) except Exception as err: logger.debug(str(err)) raise TaskError("Failed to open input image: %s" % err.message) # get the filter for this image filter_name = input_hdulist[extno].header.get('FILTER', 'DEFAULT') # Some nominal CFHT zeropoints that might be useful zeropoints = {"I": 25.77, "R": 26.07, "V": 26.07, "B": 25.92, "DEFAULT": 26.0, "g.MP9401": 32.0, 'r.MP9601': 31.9, 'gri.MP9603': 33.520} if zmag is None: logger.warning("No zmag supplied to daophot, looking for header or default values.") zmag = input_hdulist[extno].header.get('PHOTZP', zeropoints[filter_name]) logger.warning("Setting zmag to: {}".format(zmag)) # check for magic 'zeropoint.used' files for zpu_file in ["{}.zeropoint.used".format(os.path.splitext(fits_filename)[0]), "zeropoint.used"]: if os.access(zpu_file, os.R_OK): with open(zpu_file) as zpu_fh: zmag = float(zpu_fh.read()) logger.warning("Using file {} to set zmag to: {}".format(zpu_file, zmag)) break photzp = input_hdulist[extno].header.get('PHOTZP', zeropoints.get(filter_name, zeropoints["DEFAULT"])) if zmag != photzp: logger.warning(("zmag sent to daophot: ({}) " "doesn't match PHOTZP value in image header: ({})".format(zmag, photzp))) # setup IRAF to do the magnitude/centroid measurements iraf.set(uparm="./") iraf.digiphot() iraf.apphot() iraf.daophot(_doprint=0) iraf.photpars.apertures = aperture iraf.photpars.zmag = zmag iraf.datapars.datamin = 0 iraf.datapars.datamax = maxcount iraf.datapars.exposur = "" iraf.datapars.itime = exptime iraf.fitskypars.annulus = sky iraf.fitskypars.dannulus = swidth iraf.fitskypars.salgorithm = "mode" iraf.fitskypars.sloclip = 5.0 iraf.fitskypars.shiclip = 5.0 if centroid: iraf.centerpars.calgori = "centroid" iraf.centerpars.cbox = 5. iraf.centerpars.cthreshold = 0. iraf.centerpars.maxshift = 2. iraf.centerpars.clean = 'no' else: iraf.centerpars.calgori = "none" iraf.phot.update = 'no' iraf.phot.verbose = 'no' iraf.phot.verify = 'no' iraf.phot.interactive = 'no' # Used for passing the input coordinates coofile = tempfile.NamedTemporaryFile(suffix=".coo", delete=False) for i in range(len(x_in)): coofile.write("%f %f \n" % (x_in[i], y_in[i])) coofile.flush() # Used for receiving the results of the task # mag_fd, mag_path = tempfile.mkstemp(suffix=".mag") magfile = tempfile.NamedTemporaryFile(suffix=".mag", delete=False) # Close the temp files before sending to IRAF due to docstring: # "Whether the name can be used to open the file a second time, while # the named temporary file is still open, varies across platforms" coofile.close() magfile.close() os.remove(magfile.name) iraf.phot(fits_filename+"[{}]".format(extno), coofile.name, magfile.name) pdump_out = ascii.read(magfile.name, format='daophot') logging.debug("PHOT FILE:\n"+str(pdump_out)) if not len(pdump_out) > 0: mag_content = open(magfile.name).read() raise TaskError("photometry failed. {}".format(mag_content)) # apply the aperture correction pdump_out['MAG'] -= apcor # if pdump_out['PIER'][0] != 0 or pdump_out['SIER'][0] != 0 or pdump_out['CIER'][0] != 0: # raise ValueError("Photometry failed:\n {}".format(pdump_out)) # Clean up temporary files generated by IRAF os.remove(coofile.name) os.remove(magfile.name) logger.debug("Computed aperture photometry on {} objects in {}".format(len(pdump_out), fits_filename)) del input_hdulist return pdump_out
Compute the centroids and magnitudes of a bunch sources on fits image. :rtype : astropy.table.Table :param fits_filename: Name of fits image to measure source photometry on. :type fits_filename: str :param x_in: x location of source to measure :type x_in: float, numpy.array :param y_in: y location of source to measure :type y_in: float, numpy.array :param aperture: radius of circular aperture to use. :type aperture: float :param sky: radius of inner sky annulus :type sky: float :param swidth: width of the sky annulus :type swidth: float :param apcor: Aperture correction to take aperture flux to full flux. :type apcor: float :param maxcount: maximum linearity in the image. :type maxcount: float :param exptime: exposure time, relative to zmag supplied :type exptime: float :param zmag: zeropoint magnitude :param extno: extension of fits_filename the x/y location refers to.
def state(self, state): """Set the current build state and record the time to maintain history. Note! This is different from the dataset state. Setting the build set is commiteed to the progress table/database immediately. The dstate is also set, but is not committed until the bundle is committed. So, the dstate changes more slowly. """ assert state != 'build_bundle' self.buildstate.state.current = state self.buildstate.state[state] = time() self.buildstate.state.lasttime = time() self.buildstate.state.error = False self.buildstate.state.exception = None self.buildstate.state.exception_type = None self.buildstate.commit() if state in (self.STATES.NEW, self.STATES.CLEANED, self.STATES.BUILT, self.STATES.FINALIZED, self.STATES.SOURCE): state = state if state != self.STATES.CLEANED else self.STATES.NEW self.dstate = state
Set the current build state and record the time to maintain history. Note! This is different from the dataset state. Setting the build set is commiteed to the progress table/database immediately. The dstate is also set, but is not committed until the bundle is committed. So, the dstate changes more slowly.
def fingerprint( self, phrase, phonetic_algorithm=double_metaphone, joiner=' ', *args, **kwargs ): """Return the phonetic fingerprint of a phrase. Parameters ---------- phrase : str The string from which to calculate the phonetic fingerprint phonetic_algorithm : function A phonetic algorithm that takes a string and returns a string (presumably a phonetic representation of the original string). By default, this function uses :py:func:`.double_metaphone`. joiner : str The string that will be placed between each word *args Variable length argument list **kwargs Arbitrary keyword arguments Returns ------- str The phonetic fingerprint of the phrase Examples -------- >>> pf = Phonetic() >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.') '0 afr fks jmpt kk ls prn tk' >>> from abydos.phonetic import soundex >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.', ... phonetic_algorithm=soundex) 'b650 d200 f200 j513 l200 o160 q200 t000' """ phonetic = '' for word in phrase.split(): word = phonetic_algorithm(word, *args, **kwargs) if not isinstance(word, text_type) and hasattr(word, '__iter__'): word = word[0] phonetic += word + joiner phonetic = phonetic[: -len(joiner)] return super(self.__class__, self).fingerprint(phonetic)
Return the phonetic fingerprint of a phrase. Parameters ---------- phrase : str The string from which to calculate the phonetic fingerprint phonetic_algorithm : function A phonetic algorithm that takes a string and returns a string (presumably a phonetic representation of the original string). By default, this function uses :py:func:`.double_metaphone`. joiner : str The string that will be placed between each word *args Variable length argument list **kwargs Arbitrary keyword arguments Returns ------- str The phonetic fingerprint of the phrase Examples -------- >>> pf = Phonetic() >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.') '0 afr fks jmpt kk ls prn tk' >>> from abydos.phonetic import soundex >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.', ... phonetic_algorithm=soundex) 'b650 d200 f200 j513 l200 o160 q200 t000'
def get_settings_from_client(client): """Pull out settings from a SoftLayer.BaseClient instance. :param client: SoftLayer.BaseClient instance """ settings = { 'username': '', 'api_key': '', 'timeout': '', 'endpoint_url': '', } try: settings['username'] = client.auth.username settings['api_key'] = client.auth.api_key except AttributeError: pass transport = _resolve_transport(client.transport) try: settings['timeout'] = transport.timeout settings['endpoint_url'] = transport.endpoint_url except AttributeError: pass return settings
Pull out settings from a SoftLayer.BaseClient instance. :param client: SoftLayer.BaseClient instance
def extended_key_usage(self): """The :py:class:`~django_ca.extensions.ExtendedKeyUsage` extension, or ``None`` if it doesn't exist.""" try: ext = self.x509.extensions.get_extension_for_oid(ExtensionOID.EXTENDED_KEY_USAGE) except x509.ExtensionNotFound: return None return ExtendedKeyUsage(ext)
The :py:class:`~django_ca.extensions.ExtendedKeyUsage` extension, or ``None`` if it doesn't exist.
def add_status_code(code): """用于将mprpc的标准异常注册到`_mprpc_exceptions`的装饰器. Parameters: code (int): - 标准状态码 Return: (Callable): - 装饰函数 """ def class_decorator(cls): """内部装饰函数,用于将异常类注册到对应的状态码. Parameters: cls (Callable): - 要注册的异常类 Return: (Callable): - 注册的异常类 """ cls.status_code = code _mprpc_exceptions[code] = cls return cls return class_decorator
用于将mprpc的标准异常注册到`_mprpc_exceptions`的装饰器. Parameters: code (int): - 标准状态码 Return: (Callable): - 装饰函数
def set_system_time(self, time_source, ntp_server, date_format, time_format, time_zone, is_dst, dst, year, mon, day, hour, minute, sec, callback=None): ''' Set systeim time ''' if ntp_server not in ['time.nist.gov', 'time.kriss.re.kr', 'time.windows.com', 'time.nuri.net', ]: raise ValueError('Unsupported ntpServer') params = {'timeSource': time_source, 'ntpServer' : ntp_server, 'dateFormat': date_format, 'timeFormat': time_format, 'timeZone' : time_zone, 'isDst' : is_dst, 'dst' : dst, 'year' : year, 'mon' : mon, 'day' : day, 'hour' : hour, 'minute' : minute, 'sec' : sec } return self.execute_command('setSystemTime', params, callback=callback)
Set systeim time
def get_command(self, ctx: click.Context, name: str) -> click.Command: """Return the relevant command given the context and name. .. warning:: This differs substaintially from Flask in that it allows for the inbuilt commands to be overridden. """ info = ctx.ensure_object(ScriptInfo) command = None try: command = info.load_app().cli.get_command(ctx, name) except NoAppException: pass if command is None: command = super().get_command(ctx, name) return command
Return the relevant command given the context and name. .. warning:: This differs substaintially from Flask in that it allows for the inbuilt commands to be overridden.
def pressure_tendency(code: str, unit: str = 'mb') -> str: """ Translates a 5-digit pressure outlook code Ex: 50123 -> 12.3 mb: Increasing, then decreasing """ width, precision = int(code[2:4]), code[4] return ('3-hour pressure difference: +/- ' f'{width}.{precision} {unit} - {PRESSURE_TENDENCIES[code[1]]}')
Translates a 5-digit pressure outlook code Ex: 50123 -> 12.3 mb: Increasing, then decreasing
def _mod_run_check(cmd_kwargs, onlyif, unless): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) else return True ''' if onlyif: if __salt__['cmd.retcode'](onlyif, **cmd_kwargs) != 0: return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless: if __salt__['cmd.retcode'](unless, **cmd_kwargs) == 0: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} # No reason to stop, return True return True
Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) else return True
def run_to_selected_state(self, path, state_machine_id=None): """Execute the state machine until a specific state. This state won't be executed. This is an asynchronous task """ if self.state_machine_manager.get_active_state_machine() is not None: self.state_machine_manager.get_active_state_machine().root_state.recursively_resume_states() if not self.finished_or_stopped(): logger.debug("Resume execution engine and run to selected state!") self.run_to_states = [] self.run_to_states.append(path) self.set_execution_mode(StateMachineExecutionStatus.RUN_TO_SELECTED_STATE) else: logger.debug("Start execution engine and run to selected state!") if state_machine_id is not None: self.state_machine_manager.active_state_machine_id = state_machine_id self.set_execution_mode(StateMachineExecutionStatus.RUN_TO_SELECTED_STATE) self.run_to_states = [] self.run_to_states.append(path) self._run_active_state_machine()
Execute the state machine until a specific state. This state won't be executed. This is an asynchronous task
def op_canonicalize(op_name, parsed_op): """ Get the canonical representation of a parsed operation's data. Meant for backwards-compatibility """ global CANONICALIZE_METHODS if op_name not in CANONICALIZE_METHODS: # no canonicalization needed return parsed_op else: return CANONICALIZE_METHODS[op_name](parsed_op)
Get the canonical representation of a parsed operation's data. Meant for backwards-compatibility
def process_constraints(self, inequalities=None, equalities=None, momentinequalities=None, momentequalities=None, block_index=0, removeequalities=False): """Process the constraints and generate localizing matrices. Useful only if the moment matrix already exists. Call it if you want to replace your constraints. The number of the respective types of constraints and the maximum degree of each constraint must remain the same. :param inequalities: Optional parameter to list inequality constraints. :type inequalities: list of :class:`sympy.core.exp.Expr`. :param equalities: Optional parameter to list equality constraints. :type equalities: list of :class:`sympy.core.exp.Expr`. :param momentinequalities: Optional parameter of inequalities defined on moments. :type momentinequalities: list of :class:`sympy.core.exp.Expr`. :param momentequalities: Optional parameter of equalities defined on moments. :type momentequalities: list of :class:`sympy.core.exp.Expr`. :param removeequalities: Optional parameter to attempt removing the equalities by solving the linear equations. :param removeequalities: Optional parameter to attempt removing the equalities by solving the linear equations. :type removeequalities: bool. """ self.status = "unsolved" if block_index == 0: if self._original_F is not None: self.F = self._original_F self.obj_facvar = self._original_obj_facvar self.constant_term = self._original_constant_term self.n_vars = len(self.obj_facvar) self._new_basis = None block_index = self.constraint_starting_block self.__wipe_F_from_constraints() self.constraints = flatten([inequalities]) self._constraint_to_block_index = {} for constraint in self.constraints: self._constraint_to_block_index[constraint] = (block_index, ) block_index += 1 if momentinequalities is not None: for mineq in momentinequalities: self.constraints.append(mineq) self._constraint_to_block_index[mineq] = (block_index, ) block_index += 1 if not (removeequalities or equalities is None): # Equalities are converted to pairs of inequalities for k, equality in enumerate(equalities): if equality.is_Relational: equality = convert_relational(equality) self.constraints.append(equality) self.constraints.append(-equality) ln = len(self.localizing_monomial_sets[block_index- self.constraint_starting_block]) self._constraint_to_block_index[equality] = (block_index, block_index+ln*(ln+1)//2) block_index += ln*(ln+1) if momentequalities is not None and not removeequalities: for meq in momentequalities: self.constraints += [meq, flip_sign(meq)] self._constraint_to_block_index[meq] = (block_index, block_index+1) block_index += 2 block_index = self.constraint_starting_block self.__process_inequalities(block_index) if removeequalities: self.__remove_equalities(equalities, momentequalities)
Process the constraints and generate localizing matrices. Useful only if the moment matrix already exists. Call it if you want to replace your constraints. The number of the respective types of constraints and the maximum degree of each constraint must remain the same. :param inequalities: Optional parameter to list inequality constraints. :type inequalities: list of :class:`sympy.core.exp.Expr`. :param equalities: Optional parameter to list equality constraints. :type equalities: list of :class:`sympy.core.exp.Expr`. :param momentinequalities: Optional parameter of inequalities defined on moments. :type momentinequalities: list of :class:`sympy.core.exp.Expr`. :param momentequalities: Optional parameter of equalities defined on moments. :type momentequalities: list of :class:`sympy.core.exp.Expr`. :param removeequalities: Optional parameter to attempt removing the equalities by solving the linear equations. :param removeequalities: Optional parameter to attempt removing the equalities by solving the linear equations. :type removeequalities: bool.
def dispatch(self, event: Event) -> Iterator[Any]: """ Yields handlers matching the routing of the incoming :class:`slack.events.Event`. Args: event: :class:`slack.events.Event` Yields: handler """ LOG.debug('Dispatching event "%s"', event.get("type")) if event["type"] in self._routes: for detail_key, detail_values in self._routes.get( event["type"], {} ).items(): event_value = event.get(detail_key, "*") yield from detail_values.get(event_value, []) else: return
Yields handlers matching the routing of the incoming :class:`slack.events.Event`. Args: event: :class:`slack.events.Event` Yields: handler
def remove_cons_vars_from_problem(model, what): """Remove variables and constraints from a Model's solver object. Useful to temporarily remove variables and constraints from a Models's solver object. Parameters ---------- model : a cobra model The model from which to remove the variables and constraints. what : list or tuple of optlang variables or constraints. The variables or constraints to remove from the model. Must be of class `model.problem.Variable` or `model.problem.Constraint`. """ context = get_context(model) model.solver.remove(what) if context: context(partial(model.solver.add, what))
Remove variables and constraints from a Model's solver object. Useful to temporarily remove variables and constraints from a Models's solver object. Parameters ---------- model : a cobra model The model from which to remove the variables and constraints. what : list or tuple of optlang variables or constraints. The variables or constraints to remove from the model. Must be of class `model.problem.Variable` or `model.problem.Constraint`.
def get_url(params): """Return external URL for warming up a given chart/table cache.""" baseurl = 'http://{SUPERSET_WEBSERVER_ADDRESS}:{SUPERSET_WEBSERVER_PORT}/'.format( **app.config) with app.test_request_context(): return urllib.parse.urljoin( baseurl, url_for('Superset.explore_json', **params), )
Return external URL for warming up a given chart/table cache.
def p_instanceDeclaration(p): # pylint: disable=line-too-long """instanceDeclaration : INSTANCE OF className '{' valueInitializerList '}' ';' | INSTANCE OF className alias '{' valueInitializerList '}' ';' | qualifierList INSTANCE OF className '{' valueInitializerList '}' ';' | qualifierList INSTANCE OF className alias '{' valueInitializerList '}' ';' """ # noqa: E501 # pylint: disable=too-many-locals,too-many-branches,too-many-statements alias = None quals = OrderedDict() ns = p.parser.handle.default_namespace if isinstance(p[1], six.string_types): # no qualifiers cname = p[3] if p[4] == '{': props = p[5] else: props = p[6] alias = p[4] else: cname = p[4] # quals = p[1] # qualifiers on instances are deprecated -- rightly so. if p[5] == '{': props = p[6] else: props = p[7] alias = p[5] try: cc = p.parser.handle.GetClass(cname, LocalOnly=False, IncludeQualifiers=True) p.parser.classnames[ns].append(cc.classname.lower()) except CIMError as ce: ce.file_line = (p.parser.file, p.lexer.lineno) if ce.status_code == CIM_ERR_NOT_FOUND: file_ = p.parser.mofcomp.find_mof(cname) if p.parser.verbose: p.parser.log( _format("Class {0!A} does not exist", cname)) if file_: p.parser.mofcomp.compile_file(file_, ns) cc = p.parser.handle.GetClass(cname, LocalOnly=False, IncludeQualifiers=True) else: if p.parser.verbose: p.parser.log("Can't find file to satisfy class") ce = CIMError(CIM_ERR_INVALID_CLASS, cname) ce.file_line = (p.parser.file, p.lexer.lineno) raise ce else: raise path = CIMInstanceName(cname, namespace=ns) inst = CIMInstance(cname, qualifiers=quals, path=path) keybindings = NocaseDict() # dictionary to build kb if alias exists for prop in props: pname = prop[1] pval = prop[2] try: cprop = cc.properties[pname] except KeyError: ce = CIMError( CIM_ERR_INVALID_PARAMETER, _format("Invalid property. Not in class: {0!A}", pname)) ce.file_line = (p.parser.file, p.lexer.lineno) raise ce # confirm property name not duplicated. if pname in inst.properties: ce = CIMError( CIM_ERR_INVALID_PARAMETER, _format("Duplicate property: {0!A}", pname)) ce.file_line = (p.parser.file, p.lexer.lineno) raise ce try: # build instance property from class property but without # qualifiers, default value, pprop = cprop.copy() pprop.qualifiers = NocaseDict(None) pprop.value = cimvalue(pval, cprop.type) inst.properties[pname] = pprop # if alias and this is key property, add keybinding if alias and 'key' in cprop.qualifiers: keybindings[pname] = pprop.value except ValueError as ve: ce = CIMError( CIM_ERR_INVALID_PARAMETER, _format("Invalid value for property {0!A}: {1}", pname, ve)) ce.file_line = (p.parser.file, p.lexer.lineno) raise ce if alias: if keybindings: inst.path.keybindings = keybindings p.parser.aliases[alias] = inst.path p[0] = inst
instanceDeclaration : INSTANCE OF className '{' valueInitializerList '}' ';' | INSTANCE OF className alias '{' valueInitializerList '}' ';' | qualifierList INSTANCE OF className '{' valueInitializerList '}' ';' | qualifierList INSTANCE OF className alias '{' valueInitializerList '}' ';'
def _parse_octet(self, octet_str): """Convert a decimal octet into an integer. Args: octet_str: A string, the number to parse. Returns: The octet as an integer. Raises: ValueError: if the octet isn't strictly a decimal from [0..255]. """ if not octet_str: raise ValueError("Empty octet not permitted") # Whitelist the characters, since int() allows a lot of bizarre stuff. if not self._DECIMAL_DIGITS.issuperset(octet_str): msg = "Only decimal digits permitted in %r" raise ValueError(msg % octet_str) # We do the length check second, since the invalid character error # is likely to be more informative for the user if len(octet_str) > 3: msg = "At most 3 characters permitted in %r" raise ValueError(msg % octet_str) # Convert to integer (we know digits are legal) octet_int = int(octet_str, 10) # Any octets that look like they *might* be written in octal, # and which don't look exactly the same in both octal and # decimal are rejected as ambiguous if octet_int > 7 and octet_str[0] == '0': msg = "Ambiguous (octal/decimal) value in %r not permitted" raise ValueError(msg % octet_str) if octet_int > 255: raise ValueError("Octet %d (> 255) not permitted" % octet_int) return octet_int
Convert a decimal octet into an integer. Args: octet_str: A string, the number to parse. Returns: The octet as an integer. Raises: ValueError: if the octet isn't strictly a decimal from [0..255].
def _build_integer_type(var, property_path=None): """ Builds schema definitions for integer type values. :param var: The integer type value :param List[str] property_path: The property path of the current type, defaults to None, optional :param property_path: [type], optional :return: The built schema definition :rtype: Dict[str, Any] """ if not property_path: property_path = [] schema = {"type": "integer"} if is_builtin_type(var): return schema if is_config_var(var): schema.update( _build_attribute_modifiers(var, {"min": "minimum", "max": "maximum"}) ) return schema
Builds schema definitions for integer type values. :param var: The integer type value :param List[str] property_path: The property path of the current type, defaults to None, optional :param property_path: [type], optional :return: The built schema definition :rtype: Dict[str, Any]
def preprocess_worksheet(self, table, worksheet): ''' Performs a preprocess pass of the table to attempt naive conversions of data and to record the initial types of each cell. ''' table_conversion = [] flags = {} units = {} for rind, row in enumerate(table): conversion_row = [] table_conversion.append(conversion_row) if self.skippable_rows and worksheet in self.skippable_rows and rind in self.skippable_rows[worksheet]: self.flag_change(flags, 'interpreted', (rind, None), worksheet, self.FLAGS['skipped-row']) continue for cind, cell in enumerate(row): position = (rind, cind) if self.skippable_columns and worksheet in self.skippable_columns and cind in self.skippable_columns[worksheet]: conversion = None self.flag_change(flags, 'interpreted', position, worksheet, self.FLAGS['skipped-column']) else: # Do the heavy lifting in pre_process_cell conversion = auto_convert_cell(self, cell, position, worksheet, flags, units, parens_as_neg=self.parens_as_neg) conversion_row.append(conversion) # Give back our conversions, type labeling, and conversion flags return table_conversion, flags, units
Performs a preprocess pass of the table to attempt naive conversions of data and to record the initial types of each cell.
def do_usufy(self, query, **kwargs): """ Verifying a usufy query in this platform. This might be redefined in any class inheriting from Platform. Args: ----- query: The element to be searched. Return: ------- A list of elements to be appended. """ results = [] test = self.check_usufy(query, **kwargs) if test: r = { "type": "i3visio.profile", "value": self.platformName + " - " + query, "attributes": [] } # Appending platform URI aux = {} aux["type"] = "i3visio.uri" aux["value"] = self.createURL(word=query, mode="usufy") aux["attributes"] = [] r["attributes"].append(aux) # Appending the alias aux = {} aux["type"] = "i3visio.alias" aux["value"] = query aux["attributes"] = [] r["attributes"].append(aux) # Appending platform name aux = {} aux["type"] = "i3visio.platform" aux["value"] = self.platformName aux["attributes"] = [] r["attributes"].append(aux) r["attributes"] += self.process_usufy(test) results.append(r) return results
Verifying a usufy query in this platform. This might be redefined in any class inheriting from Platform. Args: ----- query: The element to be searched. Return: ------- A list of elements to be appended.
def get_data_pct(self, xpct, ypct): """Calculate new data size for the given axis ratios. See :meth:`get_limits`. Parameters ---------- xpct, ypct : float Ratio for X and Y, respectively, where 1 is 100%. Returns ------- x, y : int Scaled dimensions. """ xy_mn, xy_mx = self.get_limits() width = abs(xy_mx[0] - xy_mn[0]) height = abs(xy_mx[1] - xy_mn[1]) x, y = int(float(xpct) * width), int(float(ypct) * height) return (x, y)
Calculate new data size for the given axis ratios. See :meth:`get_limits`. Parameters ---------- xpct, ypct : float Ratio for X and Y, respectively, where 1 is 100%. Returns ------- x, y : int Scaled dimensions.
def retry(num_attempts=3, exception_class=Exception, log=None, sleeptime=1): """ >>> def fail(): ... runs[0] += 1 ... raise ValueError() >>> runs = [0]; retry(sleeptime=0)(fail)() Traceback (most recent call last): ... ValueError >>> runs [3] >>> runs = [0]; retry(2, sleeptime=0)(fail)() Traceback (most recent call last): ... ValueError >>> runs [2] >>> runs = [0]; retry(exception_class=IndexError, sleeptime=0)(fail)() Traceback (most recent call last): ... ValueError >>> runs [1] >>> logger = DoctestLogger() >>> runs = [0]; retry(log=logger, sleeptime=0)(fail)() Traceback (most recent call last): ... ValueError >>> runs [3] >>> logger.print_logs() Failed with error ValueError(), trying again Failed with error ValueError(), trying again """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): for i in range(num_attempts): try: return func(*args, **kwargs) except exception_class as e: if i == num_attempts - 1: raise else: if log: log.warn('Failed with error %r, trying again', e) sleep(sleeptime) return wrapper return decorator
>>> def fail(): ... runs[0] += 1 ... raise ValueError() >>> runs = [0]; retry(sleeptime=0)(fail)() Traceback (most recent call last): ... ValueError >>> runs [3] >>> runs = [0]; retry(2, sleeptime=0)(fail)() Traceback (most recent call last): ... ValueError >>> runs [2] >>> runs = [0]; retry(exception_class=IndexError, sleeptime=0)(fail)() Traceback (most recent call last): ... ValueError >>> runs [1] >>> logger = DoctestLogger() >>> runs = [0]; retry(log=logger, sleeptime=0)(fail)() Traceback (most recent call last): ... ValueError >>> runs [3] >>> logger.print_logs() Failed with error ValueError(), trying again Failed with error ValueError(), trying again
def make_file_object_logger(fh): """ Make a logger that logs to the given file object. """ def logger_func(stmt, args, fh=fh): """ A logger that logs everything sent to a file object. """ now = datetime.datetime.now() six.print_("Executing (%s):" % now.isoformat(), file=fh) six.print_(textwrap.dedent(stmt), file=fh) six.print_("Arguments:", file=fh) pprint.pprint(args, fh) return logger_func
Make a logger that logs to the given file object.
def nd_load_and_stats(filenames, base_path=BASEPATH): """ Load multiple files from disk and generate stats Passes the list of files assuming the ding0 data structure as default in :code:`~/.ding0`. Data will be concatenated and key indicators for each grid district are returned in table and graphic format. Parameters ---------- filenames : list of str Provide list of files you want to analyze base_path : str Root directory of Ding0 data structure, i.e. '~/.ding0' (which is default). Returns ------- stats : pandas.DataFrame Statistics of each MV grid districts """ # load Ding0 data nds = [] for filename in filenames: try: nd_load = results.load_nd_from_pickle(filename= os.path.join(base_path, 'grids', filename)) nds.append(nd_load) except: print("File {mvgd} not found. It was maybe excluded by Ding0 or " "just forgotten to generate by you...".format(mvgd=filename)) nd = nds[0] for n in nds[1:]: nd.add_mv_grid_district(n._mv_grid_districts[0]) # get statistical numbers about grid stats = results.calculate_mvgd_stats(nd) return stats
Load multiple files from disk and generate stats Passes the list of files assuming the ding0 data structure as default in :code:`~/.ding0`. Data will be concatenated and key indicators for each grid district are returned in table and graphic format. Parameters ---------- filenames : list of str Provide list of files you want to analyze base_path : str Root directory of Ding0 data structure, i.e. '~/.ding0' (which is default). Returns ------- stats : pandas.DataFrame Statistics of each MV grid districts
def _within_box(points, boxes): """Validate which keypoints are contained inside a given box. points: NxKx2 boxes: Nx4 output: NxK """ x_within = (points[..., 0] >= boxes[:, 0, None]) & ( points[..., 0] <= boxes[:, 2, None] ) y_within = (points[..., 1] >= boxes[:, 1, None]) & ( points[..., 1] <= boxes[:, 3, None] ) return x_within & y_within
Validate which keypoints are contained inside a given box. points: NxKx2 boxes: Nx4 output: NxK
def get_nameserver_detail_output_show_nameserver_nameserver_ag_base_device(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_nameserver_detail = ET.Element("get_nameserver_detail") config = get_nameserver_detail output = ET.SubElement(get_nameserver_detail, "output") show_nameserver = ET.SubElement(output, "show-nameserver") nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid") nameserver_portid_key.text = kwargs.pop('nameserver_portid') nameserver_ag_base_device = ET.SubElement(show_nameserver, "nameserver-ag-base-device") nameserver_ag_base_device.text = kwargs.pop('nameserver_ag_base_device') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def function_exclusion_filter_builder(func: Strings) -> NodePredicate: """Build a filter that fails on nodes of the given function(s). :param func: A BEL Function or list/set/tuple of BEL functions """ if isinstance(func, str): def function_exclusion_filter(_: BELGraph, node: BaseEntity) -> bool: """Pass only for a node that doesn't have the enclosed function. :return: If the node doesn't have the enclosed function """ return node[FUNCTION] != func return function_exclusion_filter elif isinstance(func, Iterable): functions = set(func) def functions_exclusion_filter(_: BELGraph, node: BaseEntity) -> bool: """Pass only for a node that doesn't have the enclosed functions. :return: If the node doesn't have the enclosed functions """ return node[FUNCTION] not in functions return functions_exclusion_filter raise ValueError('Invalid type for argument: {}'.format(func))
Build a filter that fails on nodes of the given function(s). :param func: A BEL Function or list/set/tuple of BEL functions
def explore(args): """Create mapping of sequences of two clusters """ logger.info("reading sequeces") data = load_data(args.json) logger.info("get sequences from json") #get_sequences_from_cluster() c1, c2 = args.names.split(",") seqs, names = get_sequences_from_cluster(c1, c2, data[0]) loci = get_precursors_from_cluster(c1, c2, data[0]) logger.info("map all sequences to all loci") print("%s" % (loci)) map_to_precursors(seqs, names, loci, os.path.join(args.out, "map.tsv"), args) #map_sequences_w_bowtie(sequences, precursors) logger.info("plot sequences on loci") #get_matrix_position() #plot_sequences() logger.info("Done")
Create mapping of sequences of two clusters
def analyze(self, text): """ Runs a line of text through MeCab, and returns the results as a list of lists ("records") that contain the MeCab analysis of each word. """ try: self.process # make sure things are loaded text = render_safe(text).replace('\n', ' ').lower() results = [] for chunk in string_pieces(text): self.send_input((chunk + '\n').encode('utf-8')) while True: out_line = self.receive_output_line().decode('utf-8') if out_line == 'EOS\n': break word, info = out_line.strip('\n').split('\t') record_parts = [word] + info.split(',') # Pad the record out to have 10 parts if it doesn't record_parts += [None] * (10 - len(record_parts)) record = MeCabRecord(*record_parts) # special case for detecting nai -> n if (record.surface == 'ん' and record.conjugation == '不変化型'): # rebuild the record so that record.root is 'nai' record_parts[MeCabRecord._fields.index('root')] = 'ない' record = MeCabRecord(*record_parts) results.append(record) return results except ProcessError: self.restart_process() return self.analyze(text)
Runs a line of text through MeCab, and returns the results as a list of lists ("records") that contain the MeCab analysis of each word.
async def expand_all_quays(self) -> None: """Find all quays from stop places.""" if not self.stops: return headers = {'ET-Client-Name': self._client_name} request = { 'query': GRAPHQL_STOP_TO_QUAY_TEMPLATE, 'variables': { 'stops': self.stops, 'omitNonBoarding': self.omit_non_boarding } } with async_timeout.timeout(10): resp = await self.web_session.post(RESOURCE, json=request, headers=headers) if resp.status != 200: _LOGGER.error( "Error connecting to Entur, response http status code: %s", resp.status) return None result = await resp.json() if 'errors' in result: return for stop_place in result['data']['stopPlaces']: if len(stop_place['quays']) > 1: for quay in stop_place['quays']: if quay['estimatedCalls']: self.quays.append(quay['id'])
Find all quays from stop places.