code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def upgrade(self, flag): """Upgrade Slackware binary packages with new """ for pkg in self.binary: try: subprocess.call("upgradepkg {0} {1}".format(flag, pkg), shell=True) check = pkg[:-4].split("/")[-1] if os.path.isfile(self.meta.pkg_path + check): print("Completed!\n") else: raise SystemExit() except subprocess.CalledProcessError: self._not_found("Can't upgrade", self.binary, pkg) raise SystemExit(1)
Upgrade Slackware binary packages with new
def get_question_ids_for_assessment_part(self, assessment_part_id): """convenience method returns unique question ids associated with an assessment_part_id""" question_ids = [] for question_map in self._my_map['questions']: if question_map['assessmentPartId'] == str(assessment_part_id): question_ids.append(self.get_question(question_map=question_map).get_id()) return question_ids
convenience method returns unique question ids associated with an assessment_part_id
def add_edge(self, from_index, to_index, weight=None, warn_duplicates=True, edge_properties=None): """ Add edge to graph. Since physically a 'bond' (or other connection between sites) doesn't have a direction, from_index, from_jimage can be swapped with to_index, to_jimage. However, images will always always be shifted so that from_index < to_index and from_jimage becomes (0, 0, 0). :param from_index: index of site connecting from :param to_index: index of site connecting to :param weight (float): e.g. bond length :param warn_duplicates (bool): if True, will warn if trying to add duplicate edges (duplicate edges will not be added in either case) :param edge_properties (dict): any other information to store on graph edges, similar to Structure's site_properties :return: """ # this is not necessary for the class to work, but # just makes it neater if to_index < from_index: to_index, from_index = from_index, to_index # sanitize types from_index, to_index = int(from_index), int(to_index) # check we're not trying to add a duplicate edge # there should only ever be at most one edge # between two sites existing_edge_data = self.graph.get_edge_data(from_index, to_index) if existing_edge_data and warn_duplicates: warnings.warn("Trying to add an edge that already exists from " "site {} to site {}.".format(from_index, to_index)) return # generic container for additional edge properties, # similar to site properties edge_properties = edge_properties or {} if weight: self.graph.add_edge(from_index, to_index, weight=weight, **edge_properties) else: self.graph.add_edge(from_index, to_index, **edge_properties)
Add edge to graph. Since physically a 'bond' (or other connection between sites) doesn't have a direction, from_index, from_jimage can be swapped with to_index, to_jimage. However, images will always always be shifted so that from_index < to_index and from_jimage becomes (0, 0, 0). :param from_index: index of site connecting from :param to_index: index of site connecting to :param weight (float): e.g. bond length :param warn_duplicates (bool): if True, will warn if trying to add duplicate edges (duplicate edges will not be added in either case) :param edge_properties (dict): any other information to store on graph edges, similar to Structure's site_properties :return:
def generate_random_upload_path(instance, filename): """ Pass this function to upload_to argument of FileField to store the file on an unguessable path. The format of the path is class_name/hash/original_filename. """ return os.path.join(instance.__class__.__name__.lower(), uuid().hex, filename)
Pass this function to upload_to argument of FileField to store the file on an unguessable path. The format of the path is class_name/hash/original_filename.
def ensure_compatible(left, right): """Raise an informative ``ValueError`` if the two definitions disagree.""" conflicts = list(conflicting_pairs(left, right)) if conflicts: raise ValueError('conflicting values for object/property pairs: %r' % conflicts)
Raise an informative ``ValueError`` if the two definitions disagree.
def getUI(prog_name, args): """Build and return user interface object for this script.""" longDescription = "Given a set of BED intervals, compute a profile of " +\ "conservation by averaging over all intervals using a " +\ "whole genome alignment to a set of relevent species." +\ "\n\n" +\ "Usage: " + prog_name + " [options] regions.bed " +\ "genome-alig species" +\ "\n\n" +\ "genome-alig can be either a single MAF file, or a " +\ "directory of MAF files. In the latter case, the " +\ "directory may also optionally contain index files for " +\ "the alignment files." shortDescription = longDescription ui = CLI(prog_name, shortDescription, longDescription) # gotta have two args -- MAF dir/file and BED regions. # Input by stdin not allowed ui.minArgs = 3 ui.maxArgs = 4 ui.addOption(Option(short="o", long="output", argName="filename", description="output to given file, else stdout", required=False, type=str)) ui.addOption(Option(short="w", long="window", argName="size", description="size of window to compute around each " + "interval; " + str(DEFAULT_WINDOW_SIZE) + " to use whole interval. " + "Default " + str(DEFAULT_WINDOW_SIZE), required=False, type=int)) ui.addOption(Option(short="e", long="extensions", argName="extension", description="if genome-alig specifies a directory, " + "treat files with this extension as " + "alignment files.", required=False, type=str)) ui.addOption(Option(short="i", long="index-extensions", argName="extension", description="if genome-alig specifies a directory, " + "treat files with this extension as " + "index files for alignments.", required=False, type=str)) ui.addOption(Option(short="f", long="fail-no-index", description="fail if an alignment file without an " + "index is found; otherwise index-less " + "alignment files are loaded whole (which " + "might be slow if they're large, and " + "might require a lot of memory)", default=False, required=False)) ui.addOption(Option(short="m", long="missing", argName="strategy", description="how to treat missing sequences in " + "blocks. Options are " + ", ".join([str(x.name) for x in MissingSequenceHandler]), required=False, type=str)) ui.addOption(Option(short="s", long="species", argName="species", description="consider only these species. Default is " + "all.", required=False, type=str)) ui.addOption(Option(short="v", long="verbose", description="output additional messages to stderr " + "about run (default: " + str(DEFAULT_VERBOSITY) + ")", default=DEFAULT_VERBOSITY, required=False)) ui.addOption(Option(short="h", long="help", description="show this help message ", special=True)) ui.addOption(Option(short="u", long="test", description="run unit tests ", special=True)) ui.parseCommandLine(args) return ui
Build and return user interface object for this script.
def print_annotation(self): """Print annotation "key: value" pairs to standard output.""" for path, ann in self.annotation.items(): print("{}: {}".format(path, ann['value']))
Print annotation "key: value" pairs to standard output.
async def sonar_data_retrieve(self, trigger_pin): """ Retrieve Ping (HC-SR04 type) data. The data is presented as a dictionary. The 'key' is the trigger pin specified in sonar_config() and the 'data' is the current measured distance (in centimeters) for that pin. If there is no data, the value is set to None. :param trigger_pin: key into sonar data map :returns: active_sonar_map """ # sonar_pin_entry = self.active_sonar_map[pin] sonar_pin_entry = self.active_sonar_map.get(trigger_pin) value = sonar_pin_entry[1] return value
Retrieve Ping (HC-SR04 type) data. The data is presented as a dictionary. The 'key' is the trigger pin specified in sonar_config() and the 'data' is the current measured distance (in centimeters) for that pin. If there is no data, the value is set to None. :param trigger_pin: key into sonar data map :returns: active_sonar_map
def parse(self, debug=False): """Returns parsed text""" if self._parsed is None: try: if self._mode == "html": self._parsed = self.html(self._content, self._show_everything, self._translation) else: self._parsed = self.rst(self._content, self._show_everything, self._translation, debug=debug) except Exception as e: if debug: raise BaseException("Parsing failed") from e else: self._parsed = self._translation.gettext("<b>Parsing failed</b>: <pre>{}</pre>").format(html.escape(self._content)) return self._parsed
Returns parsed text
def gen_gradient(args, resource, inflow, radius, loc, common=True): """ Returns a line of text to add to an environment file, initializing a gradient resource with the specified name (string), inflow(int), radius(int), and location (tuple of ints) """ return "".join(["GRADIENT_RESOURCE ", str(resource), ":height=", str(radius), ":plateau=", str(inflow), ":spread=", str(radius-1), ":common=", str(int(common)), ":updatestep=1000000:peakx=", str(loc[0]), ":peaky=", str(loc[1]), ":plateau_inflow=", str(inflow), ":initial=", str(inflow) + "\n"])
Returns a line of text to add to an environment file, initializing a gradient resource with the specified name (string), inflow(int), radius(int), and location (tuple of ints)
def set_titles(self, template="{coord} = {value}", maxchar=30, **kwargs): """ Draw titles either above each facet or on the grid margins. Parameters ---------- template : string Template for plot titles containing {coord} and {value} maxchar : int Truncate titles at maxchar kwargs : keyword args additional arguments to matplotlib.text Returns ------- self: FacetGrid object """ import matplotlib as mpl kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"]) nicetitle = functools.partial(_nicetitle, maxchar=maxchar, template=template) if self._single_group: for d, ax in zip(self.name_dicts.flat, self.axes.flat): # Only label the ones with data if d is not None: coord, value = list(d.items()).pop() title = nicetitle(coord, value, maxchar=maxchar) ax.set_title(title, **kwargs) else: # The row titles on the right edge of the grid for ax, row_name in zip(self.axes[:, -1], self.row_names): title = nicetitle(coord=self._row_var, value=row_name, maxchar=maxchar) ax.annotate(title, xy=(1.02, .5), xycoords="axes fraction", rotation=270, ha="left", va="center", **kwargs) # The column titles on the top row for ax, col_name in zip(self.axes[0, :], self.col_names): title = nicetitle(coord=self._col_var, value=col_name, maxchar=maxchar) ax.set_title(title, **kwargs) return self
Draw titles either above each facet or on the grid margins. Parameters ---------- template : string Template for plot titles containing {coord} and {value} maxchar : int Truncate titles at maxchar kwargs : keyword args additional arguments to matplotlib.text Returns ------- self: FacetGrid object
def setup_logging(handler, exclude=EXCLUDE_LOGGER_DEFAULTS): """ Configures logging to pipe to Sentry. - ``exclude`` is a list of loggers that shouldn't go to Sentry. For a typical Python install: >>> from raven.handlers.logging import SentryHandler >>> client = Sentry(...) >>> setup_logging(SentryHandler(client)) Within Django: >>> from raven.contrib.django.handlers import SentryHandler >>> setup_logging(SentryHandler()) Returns a boolean based on if logging was configured or not. """ logger = logging.getLogger() if handler.__class__ in map(type, logger.handlers): return False logger.addHandler(handler) # Add StreamHandler to sentry's default so you can catch missed exceptions for logger_name in exclude: logger = logging.getLogger(logger_name) logger.propagate = False logger.addHandler(logging.StreamHandler()) return True
Configures logging to pipe to Sentry. - ``exclude`` is a list of loggers that shouldn't go to Sentry. For a typical Python install: >>> from raven.handlers.logging import SentryHandler >>> client = Sentry(...) >>> setup_logging(SentryHandler(client)) Within Django: >>> from raven.contrib.django.handlers import SentryHandler >>> setup_logging(SentryHandler()) Returns a boolean based on if logging was configured or not.
def _CreateWindowsPathResolver( self, file_system, mount_point, environment_variables): """Create a Windows path resolver and sets the environment variables. Args: file_system (dfvfs.FileSystem): file system. mount_point (dfvfs.PathSpec): mount point path specification. environment_variables (list[EnvironmentVariableArtifact]): environment variables. Returns: dfvfs.WindowsPathResolver: Windows path resolver. """ if environment_variables is None: environment_variables = [] path_resolver = windows_path_resolver.WindowsPathResolver( file_system, mount_point) for environment_variable in environment_variables: name = environment_variable.name.lower() if name not in ('systemroot', 'userprofile'): continue path_resolver.SetEnvironmentVariable( environment_variable.name, environment_variable.value) return path_resolver
Create a Windows path resolver and sets the environment variables. Args: file_system (dfvfs.FileSystem): file system. mount_point (dfvfs.PathSpec): mount point path specification. environment_variables (list[EnvironmentVariableArtifact]): environment variables. Returns: dfvfs.WindowsPathResolver: Windows path resolver.
def SGg(self): r'''Specific gravity of the gas phase of the chemical, [dimensionless]. The reference condition is air at 15.6 °C (60 °F) and 1 atm (rho=1.223 kg/m^3). The definition for gases uses the compressibility factor of the reference gas and the chemical both at the reference conditions, not the conditions of the chemical. Examples -------- >>> Chemical('argon').SGg 1.3795835970877504 ''' Vmg = self.VolumeGas(T=288.70555555555552, P=101325) if Vmg: rho = Vm_to_rho(Vmg, self.MW) return SG(rho, rho_ref=1.2231876628642968) # calculated with Mixture return None
r'''Specific gravity of the gas phase of the chemical, [dimensionless]. The reference condition is air at 15.6 °C (60 °F) and 1 atm (rho=1.223 kg/m^3). The definition for gases uses the compressibility factor of the reference gas and the chemical both at the reference conditions, not the conditions of the chemical. Examples -------- >>> Chemical('argon').SGg 1.3795835970877504
def paula_etree_to_string(tree, dtd_filename): """convert a PAULA etree into an XML string.""" return etree.tostring( tree, pretty_print=True, xml_declaration=True, encoding="UTF-8", standalone='no', doctype='<!DOCTYPE paula SYSTEM "{0}">'.format(dtd_filename))
convert a PAULA etree into an XML string.
def dd2dm(dd): """Convert decimal to degrees, decimal minutes """ d,m,s = dd2dms(dd) m = m + float(s)/3600 return d,m,s
Convert decimal to degrees, decimal minutes
def _htpasswd(username, password, **kwargs): ''' Provide authentication via Apache-style htpasswd files ''' from passlib.apache import HtpasswdFile pwfile = HtpasswdFile(kwargs['filename']) # passlib below version 1.6 uses 'verify' function instead of 'check_password' if salt.utils.versions.version_cmp(kwargs['passlib_version'], '1.6') < 0: return pwfile.verify(username, password) else: return pwfile.check_password(username, password)
Provide authentication via Apache-style htpasswd files
def get_slo_url(self): """ Gets the SLO URL. :returns: An URL, the SLO endpoint of the IdP :rtype: string """ url = None idp_data = self.__settings.get_idp_data() if 'singleLogoutService' in idp_data.keys() and 'url' in idp_data['singleLogoutService']: url = idp_data['singleLogoutService']['url'] return url
Gets the SLO URL. :returns: An URL, the SLO endpoint of the IdP :rtype: string
def _store_oauth_access_token(self, oauth_access_token): ''' Called when login is complete to store the oauth access token This implementation stores the oauth_access_token in a seperate cookie for domain steamwebbrowser.tld ''' c = Cookie(version=0, name='oauth_access_token', value=oauth_access_token, port=None, port_specified=False, domain='steamwebbrowser.tld', domain_specified=True, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=False, comment=None, comment_url=None, rest={}, ) self.session.cookies.set_cookie(c) self._save_cookies()
Called when login is complete to store the oauth access token This implementation stores the oauth_access_token in a seperate cookie for domain steamwebbrowser.tld
def retries(timeout=30, intervals=1, time=time): """Helper for retrying something, sleeping between attempts. Returns a generator that yields ``(elapsed, remaining, wait)`` tuples, giving times in seconds. The last item, `wait`, is the suggested amount of time to sleep before trying again. :param timeout: From now, how long to keep iterating, in seconds. This can be specified as a number, or as an iterable. In the latter case, the iterator is advanced each time an interval is needed. This allows for back-off strategies. :param intervals: The sleep between each iteration, in seconds, an an iterable from which to obtain intervals. :param time: A callable that returns the current time in seconds. """ start = time() end = start + timeout if isinstance(intervals, Iterable): intervals = iter(intervals) else: intervals = repeat(intervals) return gen_retries(start, end, intervals, time)
Helper for retrying something, sleeping between attempts. Returns a generator that yields ``(elapsed, remaining, wait)`` tuples, giving times in seconds. The last item, `wait`, is the suggested amount of time to sleep before trying again. :param timeout: From now, how long to keep iterating, in seconds. This can be specified as a number, or as an iterable. In the latter case, the iterator is advanced each time an interval is needed. This allows for back-off strategies. :param intervals: The sleep between each iteration, in seconds, an an iterable from which to obtain intervals. :param time: A callable that returns the current time in seconds.
def additive_self_attention(units, n_hidden=None, n_output_features=None, activation=None): """ Computes additive self attention for time series of vectors (with batch dimension) the formula: score(h_i, h_j) = <v, tanh(W_1 h_i + W_2 h_j)> v is a learnable vector of n_hidden dimensionality, W_1 and W_2 are learnable [n_hidden, n_input_features] matrices Args: units: tf tensor with dimensionality [batch_size, time_steps, n_input_features] n_hidden: number of units in hidden representation of similarity measure n_output_features: number of features in output dense layer activation: activation at the output Returns: output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features] """ n_input_features = units.get_shape().as_list()[2] if n_hidden is None: n_hidden = n_input_features if n_output_features is None: n_output_features = n_input_features units_pairs = tf.concat([expand_tile(units, 1), expand_tile(units, 2)], 3) query = tf.layers.dense(units_pairs, n_hidden, activation=tf.tanh, kernel_initializer=INITIALIZER()) attention = tf.nn.softmax(tf.layers.dense(query, 1), dim=2) attended_units = tf.reduce_sum(attention * expand_tile(units, 1), axis=2) output = tf.layers.dense(attended_units, n_output_features, activation, kernel_initializer=INITIALIZER()) return output
Computes additive self attention for time series of vectors (with batch dimension) the formula: score(h_i, h_j) = <v, tanh(W_1 h_i + W_2 h_j)> v is a learnable vector of n_hidden dimensionality, W_1 and W_2 are learnable [n_hidden, n_input_features] matrices Args: units: tf tensor with dimensionality [batch_size, time_steps, n_input_features] n_hidden: number of units in hidden representation of similarity measure n_output_features: number of features in output dense layer activation: activation at the output Returns: output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features]
def str_to_etree(xml_str, encoding='utf-8'): """Deserialize API XML doc to an ElementTree. Args: xml_str: bytes DataONE API XML doc encoding: str Decoder to use when converting the XML doc ``bytes`` to a Unicode str. Returns: ElementTree: Matching the API version of the XML doc. """ parser = xml.etree.ElementTree.XMLParser(encoding=encoding) return xml.etree.ElementTree.fromstring(xml_str, parser=parser)
Deserialize API XML doc to an ElementTree. Args: xml_str: bytes DataONE API XML doc encoding: str Decoder to use when converting the XML doc ``bytes`` to a Unicode str. Returns: ElementTree: Matching the API version of the XML doc.
def will_tag(self): """ Check whether the feed should be tagged """ wanttags = self.retrieve_config('Tag', 'no') if wanttags == 'yes': if aux.staggerexists: willtag = True else: willtag = False print(("You want me to tag {0}, but you have not installed " "the Stagger module. I cannot honour your request."). format(self.name), file=sys.stderr, flush=True) else: willtag = False return willtag
Check whether the feed should be tagged
def list_market_catalogue(self, filter=market_filter(), market_projection=None, sort=None, max_results=1, locale=None, session=None, lightweight=None): """ Returns a list of information about published (ACTIVE/SUSPENDED) markets that does not change (or changes very rarely). :param dict filter: The filter to select desired markets :param list market_projection: The type and amount of data returned about the market :param str sort: The order of the results :param int max_results: Limit on the total number of results returned, must be greater than 0 and less than or equal to 10000 :param str locale: The language used for the response :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.MarketCatalogue] """ params = clean_locals(locals()) method = '%s%s' % (self.URI, 'listMarketCatalogue') (response, elapsed_time) = self.request(method, params, session) return self.process_response(response, resources.MarketCatalogue, elapsed_time, lightweight)
Returns a list of information about published (ACTIVE/SUSPENDED) markets that does not change (or changes very rarely). :param dict filter: The filter to select desired markets :param list market_projection: The type and amount of data returned about the market :param str sort: The order of the results :param int max_results: Limit on the total number of results returned, must be greater than 0 and less than or equal to 10000 :param str locale: The language used for the response :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.MarketCatalogue]
def _get_network(self, network_info): """Send network get request to DCNM. :param network_info: contains network info to query. """ org_name = network_info.get('organizationName', '') part_name = network_info.get('partitionName', '') segment_id = network_info['segmentId'] url = self._network_url % (org_name, part_name, segment_id) return self._send_request('GET', url, '', 'network')
Send network get request to DCNM. :param network_info: contains network info to query.
def _generate_reversed_sql(self, keys, changed_keys): """ Generate reversed operations for changes, that require full rollback and creation. """ for key in keys: if key not in changed_keys: continue app_label, sql_name = key old_item = self.from_sql_graph.nodes[key] new_item = self.to_sql_graph.nodes[key] if not old_item.reverse_sql or old_item.reverse_sql == RunSQL.noop or new_item.replace: continue # migrate backwards operation = ReverseAlterSQL(sql_name, old_item.reverse_sql, reverse_sql=old_item.sql) sql_deps = [n.key for n in self.from_sql_graph.node_map[key].children] sql_deps.append(key) self.add_sql_operation(app_label, sql_name, operation, sql_deps)
Generate reversed operations for changes, that require full rollback and creation.
def cleanup_kernels(self): """shutdown all kernels The kernels will shutdown themselves when this process no longer exists, but explicit shutdown allows the KernelManagers to cleanup the connection files. """ self.log.info('Shutting down kernels') km = self.kernel_manager # copy list, since shutdown_kernel deletes keys for kid in list(km.kernel_ids): km.shutdown_kernel(kid)
shutdown all kernels The kernels will shutdown themselves when this process no longer exists, but explicit shutdown allows the KernelManagers to cleanup the connection files.
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: ShortCodeContext for this ShortCodeInstance :rtype: twilio.rest.api.v2010.account.short_code.ShortCodeContext """ if self._context is None: self._context = ShortCodeContext( self._version, account_sid=self._solution['account_sid'], sid=self._solution['sid'], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: ShortCodeContext for this ShortCodeInstance :rtype: twilio.rest.api.v2010.account.short_code.ShortCodeContext
def dump(self, itemkey, filename=None, path=None): """ Dump a file attachment to disk, with optional filename and path """ if not filename: filename = self.item(itemkey)["data"]["filename"] if path: pth = os.path.join(path, filename) else: pth = filename file = self.file(itemkey) if self.snapshot: self.snapshot = False pth = pth + ".zip" with open(pth, "wb") as f: f.write(file)
Dump a file attachment to disk, with optional filename and path
def build(template='/', host=None, scheme=None, port=None, **template_vars): """Builds a url with a string template and template variables; relative path if host is None, abs otherwise: template format: "/staticendpoint/{dynamic_endpoint}?{params}" """ # TODO: refactor to build_absolute and build_relative instead of handling based on params parsed_host = urlparse.urlparse(host if host is not None else '') host_has_scheme = bool(parsed_host.scheme) if host_has_scheme: host = parsed_host.netloc # Prioritize scheme parameter, but if not specified, use scheme implied from host scheme = parsed_host.scheme if scheme is None else scheme port = port or parsed_host.port # Default to port override unparsed_path = urlparse.urlparse(template.format(**template_vars)).geturl() # If a host was specified, try to return a full url if host: if not scheme: raise ValueError('No scheme supplied and scheme could not be inferred from the host: {}'.format(host)) if port: host_no_port = host.partition(':')[0] # Extract the host with no port supplied host = '{host_no_port}:{port}'.format(host_no_port=host_no_port, port=port) constructed_url = '//' + host + unparsed_path url = urlparse.urlparse(constructed_url, scheme=scheme).geturl() else: url = unparsed_path # Remove trailing parameter characters url = url[:-1] if url[-1] == '?' else url url = url[:-1] if url[-1] == '&' else url return url
Builds a url with a string template and template variables; relative path if host is None, abs otherwise: template format: "/staticendpoint/{dynamic_endpoint}?{params}"
def help_center_articles_attachment_delete(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/article_attachments#delete-article-attachment" api_path = "/api/v2/help_center/articles/attachments/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, method="DELETE", **kwargs)
https://developer.zendesk.com/rest_api/docs/help_center/article_attachments#delete-article-attachment
def stop_times(self): """Return all stop_times for this agency.""" stop_times = set() for trip in self.trips(): stop_times |= trip.stop_times() return stop_times
Return all stop_times for this agency.
def fetch_next_page(self, data): ''' Fetches next page based on previously fetched data. Will get the next page url from data['paging']['next']. :param data: previously fetched API response. :type data: dict :return: API response. :rtype: dict ''' next_url = data['paging']['next'] if next_url != None: next_data = self.http._post_data(next_url, None, self.http._headers_with_access_token()) return next_data else: return None
Fetches next page based on previously fetched data. Will get the next page url from data['paging']['next']. :param data: previously fetched API response. :type data: dict :return: API response. :rtype: dict
def parse_from_parent( self, parent, # type: ET.Element state # type: _ProcessorState ): # type: (...) -> Any """Parse the element from the given parent element.""" xml_value = self._processor.parse_from_parent(parent, state) return _hooks_apply_after_parse(self._hooks, state, xml_value)
Parse the element from the given parent element.
def get_source_name(self, name): """Return the name of a source as it is defined in the pyLikelihood model object.""" if name not in self.like.sourceNames(): name = self.roi.get_source_by_name(name).name return name
Return the name of a source as it is defined in the pyLikelihood model object.
def register_list(self): """Returns a list of the indices for the CPU registers. The returned indices can be used to read the register content or grab the register name. Args: self (JLink): the ``JLink`` instance Returns: List of registers. """ num_items = self.MAX_NUM_CPU_REGISTERS buf = (ctypes.c_uint32 * num_items)() num_regs = self._dll.JLINKARM_GetRegisterList(buf, num_items) return buf[:num_regs]
Returns a list of the indices for the CPU registers. The returned indices can be used to read the register content or grab the register name. Args: self (JLink): the ``JLink`` instance Returns: List of registers.
def read_hip(self, length, extension): """Read Host Identity Protocol. Structure of HIP header [RFC 5201][RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Next Header | Header Length |0| Packet Type |Version| RES.|1| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Checksum | Controls | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Sender's Host Identity Tag (HIT) | | | | | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Receiver's Host Identity Tag (HIT) | | | | | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | / HIP Parameters / / / | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 hip.next Next Header 1 8 hip.length Header Length 2 16 - Reserved (0) 2 17 hip.type Packet Type 3 24 hip.version Version 3 28 - Reserved 3 31 - Reserved (1) 4 32 hip.chksum Checksum 6 48 hip.control Controls 8 64 hip.shit Sender's Host Identity Tag 24 192 hip.rhit Receiver's Host Identity Tag 40 320 hip.parameters HIP Parameters """ if length is None: length = len(self) _next = self._read_protos(1) _hlen = self._read_unpack(1) _type = self._read_binary(1) if _type[0] != '0': raise ProtocolError('HIP: invalid format') _vers = self._read_binary(1) if _vers[7] != '1': raise ProtocolError('HIP: invalid format') _csum = self._read_fileng(2) _ctrl = self._read_binary(2) _shit = self._read_unpack(16) _rhit = self._read_unpack(16) hip = dict( next=_next, length=(_hlen + 1) * 8, type=_HIP_TYPES.get(int(_type[1:], base=2), 'Unassigned'), version=int(_vers[:4], base=2), chksum=_csum, control=dict( anonymous=True if int(_ctrl[15], base=2) else False, ), shit=_shit, rhit=_rhit, ) _prml = _hlen - 38 if _prml: parameters = self._read_hip_para(_prml, version=hip['version']) hip['parameters'] = parameters[0] # tuple of parameter acronyms hip.update(parameters[1]) # merge parameters info to buffer length -= hip['length'] hip['packet'] = self._read_packet(header=hip['length'], payload=length) if extension: self._protos = None return hip return self._decode_next_layer(hip, _next, length)
Read Host Identity Protocol. Structure of HIP header [RFC 5201][RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Next Header | Header Length |0| Packet Type |Version| RES.|1| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Checksum | Controls | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Sender's Host Identity Tag (HIT) | | | | | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Receiver's Host Identity Tag (HIT) | | | | | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | / HIP Parameters / / / | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 hip.next Next Header 1 8 hip.length Header Length 2 16 - Reserved (0) 2 17 hip.type Packet Type 3 24 hip.version Version 3 28 - Reserved 3 31 - Reserved (1) 4 32 hip.chksum Checksum 6 48 hip.control Controls 8 64 hip.shit Sender's Host Identity Tag 24 192 hip.rhit Receiver's Host Identity Tag 40 320 hip.parameters HIP Parameters
async def send_document(self, path, entity): """Sends the file located at path to the desired entity as a document""" await self.send_file( entity, path, force_document=True, progress_callback=self.upload_progress_callback ) print('Document sent!')
Sends the file located at path to the desired entity as a document
def to_html(self, wrap_slash=False): """Render a Text MessageElement as html. :param wrap_slash: Whether to replace slashes with the slash plus the html <wbr> tag which will help to e.g. wrap html in small cells if it contains a long filename. Disabled by default as it may cause side effects if the text contains html markup. :type wrap_slash: bool :returns: Html representation of the Text MessageElement. :rtype: str """ if self.text is None: return else: text = '' for t in self.text: text += t.to_html() + ' ' text = ' '.join(text.split()) if wrap_slash: # This is a hack to make text wrappable with long filenames TS 3.3 text = text.replace('/', '/<wbr>') text = text.replace('\\', '\\<wbr>') return text
Render a Text MessageElement as html. :param wrap_slash: Whether to replace slashes with the slash plus the html <wbr> tag which will help to e.g. wrap html in small cells if it contains a long filename. Disabled by default as it may cause side effects if the text contains html markup. :type wrap_slash: bool :returns: Html representation of the Text MessageElement. :rtype: str
def assignees(self): """List of assignees to the activity.""" if 'assignees' in self._json_data and self._json_data.get('assignees_ids') == list(): return [] elif 'assignees' in self._json_data and self._json_data.get('assignees_ids'): assignees_ids_str = ','.join([str(id) for id in self._json_data.get('assignees_ids')]) return self._client.users(id__in=assignees_ids_str, is_hidden=False) return None
List of assignees to the activity.
def load(self, data): """ Load a single row of data and convert it into entities and relations. """ objs = {} for mapper in self.entities: objs[mapper.name] = mapper.load(self.loader, data) for mapper in self.relations: objs[mapper.name] = mapper.load(self.loader, data, objs)
Load a single row of data and convert it into entities and relations.
def dip_pval_tabinterpol(dip, N): ''' dip - dip value computed from dip_from_cdf N - number of observations ''' # if qDiptab_df is None: # raise DataError("Tabulated p-values not available. See installation instructions.") if np.isnan(N) or N < 10: return np.nan qDiptab_dict = {'0': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0714285714285714, 8: 0.0625, 9: 0.0555555555555556, 10: 0.05, 15: 0.0341378172277919, 20: 0.033718563622065004, 30: 0.0262674485075642, 50: 0.0218544781364545, 100: 0.0164852597438403, 200: 0.0111236388849688, 500: 0.007554885975761959, 1000: 0.00541658127872122, 2000: 0.0039043999745055702, 5000: 0.00245657785440433, 10000: 0.00174954269199566, 20000: 0.00119458814106091, 40000: 0.000852415648011777, 72000: 0.000644400053256997}, '0.01': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0714285714285714, 8: 0.0625, 9: 0.0613018090298924, 10: 0.0610132555623269, 15: 0.0546284208048975, 20: 0.0474333740698401, 30: 0.0395871890405749, 50: 0.0314400501999916, 100: 0.022831985803043, 200: 0.0165017735429825, 500: 0.0106403461127515, 1000: 0.0076028674530018705, 2000: 0.0054166418179658294, 5000: 0.0034480928223332603, 10000: 0.00244595133885302, 20000: 0.00173435346896287, 40000: 0.00122883479310665, 72000: 0.000916872204484283}, '0.02': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0714285714285714, 8: 0.0656911994503283, 9: 0.0658615858179315, 10: 0.0651627333214016, 15: 0.0572191260231815, 20: 0.0490891387627092, 30: 0.0414574606741673, 50: 0.0329008160470834, 100: 0.0238917486442849, 200: 0.0172594157992489, 500: 0.0111255573208294, 1000: 0.00794987834644799, 2000: 0.0056617138625232296, 5000: 0.00360473943713036, 10000: 0.00255710802275612, 20000: 0.0018119443458468102, 40000: 0.0012846930445701802, 72000: 0.0009579329467655321}, '0.05': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0725717816250742, 8: 0.0738651136071762, 9: 0.0732651142535317, 10: 0.0718321619656165, 15: 0.0610087367689692, 20: 0.052719998201553, 30: 0.0444462614069956, 50: 0.0353023819040016, 100: 0.0256559537977579, 200: 0.0185259426032926, 500: 0.0119353655328931, 1000: 0.0085216518343594, 2000: 0.00607120971135229, 5000: 0.0038632654801084897, 10000: 0.00273990955227265, 20000: 0.00194259470485893, 40000: 0.0013761765052555301, 72000: 0.00102641863872347}, '0.1': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0817315478539489, 8: 0.0820045917762512, 9: 0.0803941629593475, 10: 0.077966212182459, 15: 0.0642657137330444, 20: 0.0567795509056742, 30: 0.0473998525042686, 50: 0.0377279973102482, 100: 0.0273987414570948, 200: 0.0197917612637521, 500: 0.0127411306411808, 1000: 0.00909775605533253, 2000: 0.0064762535755248, 5000: 0.00412089506752692, 10000: 0.0029225480567908, 20000: 0.00207173719623868, 40000: 0.0014675150200632301, 72000: 0.0010949515421800199}, '0.2': {4: 0.125, 5: 0.1, 6: 0.0924514470941933, 7: 0.0940590181922527, 8: 0.0922700601131892, 9: 0.0890432420913848, 10: 0.0852835359834564, 15: 0.0692234107989591, 20: 0.0620134674468181, 30: 0.0516677370374349, 50: 0.0410699984399582, 100: 0.0298109370830153, 200: 0.0215233745778454, 500: 0.0138524542751814, 1000: 0.00988924521014078, 2000: 0.00703573098590029, 5000: 0.00447640050137479, 10000: 0.00317374638422465, 20000: 0.00224993202086955, 40000: 0.00159376453672466, 72000: 0.00118904090369415}, '0.3': {4: 0.125, 5: 0.1, 6: 0.103913431059949, 7: 0.10324449080087102, 8: 0.0996737189599363, 9: 0.0950811420297928, 10: 0.0903204173707099, 15: 0.0745462114365167, 20: 0.0660163872069048, 30: 0.0551037519001622, 50: 0.0437704598622665, 100: 0.0317771496530253, 200: 0.0229259769870428, 500: 0.0147536004288476, 1000: 0.0105309297090482, 2000: 0.007494212545892991, 5000: 0.00476555693102276, 10000: 0.00338072258533527, 20000: 0.00239520831473419, 40000: 0.00169668445506151, 72000: 0.00126575197699874}, '0.4': {4: 0.125, 5: 0.10872059357632902, 6: 0.113885220640212, 7: 0.110964599995697, 8: 0.10573353180273701, 9: 0.0999380897811046, 10: 0.0943334983745117, 15: 0.0792030878981762, 20: 0.0696506075066401, 30: 0.058265005347492994, 50: 0.0462925642671299, 100: 0.0336073821590387, 200: 0.024243848341112, 500: 0.0155963185751048, 1000: 0.0111322726797384, 2000: 0.007920878896017329, 5000: 0.005037040297500721, 10000: 0.0035724387653598205, 20000: 0.00253036792824665, 40000: 0.0017925341833790601, 72000: 0.00133750966361506}, '0.5': {4: 0.125, 5: 0.12156379802641401, 6: 0.123071187137781, 7: 0.11780784650433501, 8: 0.11103512984770501, 9: 0.10415356007586801, 10: 0.0977817630384725, 15: 0.083621033469191, 20: 0.0733437740592714, 30: 0.0614510857304343, 50: 0.048851155289608, 100: 0.0354621760592113, 200: 0.025584358256487003, 500: 0.0164519238025286, 1000: 0.0117439009052552, 2000: 0.008355737247680059, 5000: 0.0053123924740821294, 10000: 0.00376734715752209, 20000: 0.00266863168718114, 40000: 0.00189061261635977, 72000: 0.00141049709228472}, '0.6': {4: 0.125, 5: 0.134318918697053, 6: 0.13186973390253, 7: 0.124216086833531, 8: 0.11592005574998801, 9: 0.10800780236193198, 10: 0.102180866696628, 15: 0.0881198482202905, 20: 0.0776460662880254, 30: 0.0649164408053978, 50: 0.0516145897865757, 100: 0.0374805844550272, 200: 0.0270252129816288, 500: 0.017383057902553, 1000: 0.012405033293814, 2000: 0.00882439333812351, 5000: 0.00560929919359959, 10000: 0.00397885007249132, 20000: 0.0028181999035216, 40000: 0.00199645471886179, 72000: 0.00148936709298802}, '0.7': {4: 0.13255954878268902, 5: 0.14729879897625198, 6: 0.140564796497941, 7: 0.130409013968317, 8: 0.120561479262465, 9: 0.112512617124951, 10: 0.10996094814295099, 15: 0.093124666680253, 20: 0.0824558407118372, 30: 0.0689178762425442, 50: 0.0548121932066019, 100: 0.0398046179116599, 200: 0.0286920262150517, 500: 0.0184503949887735, 1000: 0.0131684179320803, 2000: 0.009367858207170609, 5000: 0.00595352728377949, 10000: 0.00422430013176233, 20000: 0.00299137548142077, 40000: 0.00211929748381704, 72000: 0.00158027541945626}, '0.8': {4: 0.15749736904023498, 5: 0.161085025702604, 6: 0.14941924112913002, 7: 0.136639642123068, 8: 0.125558759034845, 9: 0.12291503348081699, 10: 0.11884476721158699, 15: 0.0996694393390689, 20: 0.08834462700173701, 30: 0.0739249074078291, 50: 0.0588230482851366, 100: 0.0427283846799166, 200: 0.0308006766341406, 500: 0.0198162679782071, 1000: 0.0141377942603047, 2000: 0.01005604603884, 5000: 0.00639092280563517, 10000: 0.00453437508148542, 20000: 0.00321024899920135, 40000: 0.0022745769870358102, 72000: 0.00169651643860074}, '0.9': {4: 0.18740187880755899, 5: 0.176811998476076, 6: 0.159137064572627, 7: 0.144240669035124, 8: 0.141841067033899, 9: 0.136412639387084, 10: 0.130462149644819, 15: 0.11008749690090598, 20: 0.0972346018122903, 30: 0.0814791379390127, 50: 0.0649136324046767, 100: 0.047152783315718, 200: 0.0339967814293504, 500: 0.0218781313182203, 1000: 0.0156148055023058, 2000: 0.0111019116837591, 5000: 0.00705566126234625, 10000: 0.00500178808402368, 20000: 0.00354362220314155, 40000: 0.00250999080890397, 72000: 0.0018730618472582602}, '0.95': {4: 0.20726978858735998, 5: 0.18639179602794398, 6: 0.164769608513302, 7: 0.159903395678336, 8: 0.153978303998561, 9: 0.14660378495401902, 10: 0.139611395137099, 15: 0.118760769203664, 20: 0.105130218270636, 30: 0.0881689143126666, 50: 0.0702737877191269, 100: 0.0511279442868827, 200: 0.0368418413878307, 500: 0.0237294742633411, 1000: 0.0169343970067564, 2000: 0.0120380990328341, 5000: 0.0076506368153935, 10000: 0.00542372242836395, 20000: 0.00384330190244679, 40000: 0.00272375073486223, 72000: 0.00203178401610555}, '0.98': {4: 0.22375580462922195, 5: 0.19361253363045, 6: 0.17917654739278197, 7: 0.17519655327122302, 8: 0.16597856724751, 9: 0.157084065653166, 10: 0.150961728882481, 15: 0.128890475210055, 20: 0.11430970428125302, 30: 0.0960564383013644, 50: 0.0767095886079179, 100: 0.0558022052195208, 200: 0.0402729850316397, 500: 0.025919578977657003, 1000: 0.018513067368104, 2000: 0.0131721010552576, 5000: 0.00836821687047215, 10000: 0.00592656681022859, 20000: 0.00420258799378253, 40000: 0.00298072958568387, 72000: 0.00222356097506054}, '0.99': {4: 0.231796258864192, 5: 0.19650913979884502, 6: 0.191862827995563, 7: 0.184118659121501, 8: 0.172988528276759, 9: 0.164164643657217, 10: 0.159684158858235, 15: 0.13598356863636, 20: 0.120624043335821, 30: 0.101478558893837, 50: 0.0811998415355918, 100: 0.059024132304226, 200: 0.0426864799777448, 500: 0.0274518022761997, 1000: 0.0196080260483234, 2000: 0.0139655122281969, 5000: 0.00886357892854914, 10000: 0.00628034732880374, 20000: 0.00445774902155711, 40000: 0.00315942194040388, 72000: 0.00235782814777627}, '0.995': {4: 0.23726374382677898, 5: 0.198159967287576, 6: 0.20210197104296804, 7: 0.19101439617430602, 8: 0.179010413496374, 9: 0.172821674582338, 10: 0.16719524735674, 15: 0.14245248368127697, 20: 0.126552378036739, 30: 0.10650487144103, 50: 0.0852854646662134, 100: 0.0620425065165146, 200: 0.044958959158761, 500: 0.0288986369564301, 1000: 0.0206489568587364, 2000: 0.0146889122204488, 5000: 0.00934162787186159, 10000: 0.00661030641550873, 20000: 0.00469461513212743, 40000: 0.0033273652798148, 72000: 0.00248343580127067}, '0.998': {4: 0.241992892688593, 5: 0.19924427936243302, 6: 0.213015781111186, 7: 0.198216795232182, 8: 0.186504388711178, 9: 0.182555283567818, 10: 0.175419540856082, 15: 0.15017281653074202, 20: 0.13360135382395, 30: 0.112724636524262, 50: 0.0904847827490294, 100: 0.0658016011466099, 200: 0.0477643873749449, 500: 0.0306813505050163, 1000: 0.0219285176765082, 2000: 0.0156076779647454, 5000: 0.009932186363240291, 10000: 0.00702254699967648, 20000: 0.004994160691291679, 40000: 0.00353988965698579, 72000: 0.00264210826339498}, '0.999': {4: 0.244369839049632, 5: 0.199617527406166, 6: 0.219518627282415, 7: 0.20234101074826102, 8: 0.19448404115794, 9: 0.188658833121906, 10: 0.180611195797351, 15: 0.15545613369632802, 20: 0.138569903791767, 30: 0.117164140184417, 50: 0.0940930106666244, 100: 0.0684479731118028, 200: 0.0497198001867437, 500: 0.0320170996823189, 1000: 0.0228689168972669, 2000: 0.0162685615996248, 5000: 0.0103498795291629, 10000: 0.0073182262815645795, 20000: 0.00520917757743218, 40000: 0.00369400045486625, 72000: 0.0027524322157581}, '0.9995': {4: 0.245966625504691, 5: 0.19980094149902802, 6: 0.22433904739444602, 7: 0.205377566346832, 8: 0.200864297005026, 9: 0.19408912076824603, 10: 0.18528641605039603, 15: 0.160896499106958, 20: 0.14336916123968, 30: 0.12142585990898701, 50: 0.0974904344916743, 100: 0.0709169443994193, 200: 0.0516114611801451, 500: 0.0332452747332959, 1000: 0.023738710122235003, 2000: 0.0168874937789415, 5000: 0.0107780907076862, 10000: 0.0076065423418208, 20000: 0.005403962359243721, 40000: 0.00383345715372182, 72000: 0.0028608570740143}, '0.9998': {4: 0.24743959723326198, 5: 0.19991708183427104, 6: 0.22944933215424101, 7: 0.208306562526874, 8: 0.20884999705022897, 9: 0.19915700809389003, 10: 0.19120308390504398, 15: 0.16697940794624802, 20: 0.148940116394883, 30: 0.126733051889401, 50: 0.10228420428399698, 100: 0.0741183486081263, 200: 0.0540543978864652, 500: 0.0348335698576168, 1000: 0.0248334158891432, 2000: 0.0176505093388153, 5000: 0.0113184316868283, 10000: 0.00795640367207482, 20000: 0.00564540201704594, 40000: 0.0040079346963469605, 72000: 0.00298695044508003}, '0.9999': {4: 0.24823065965663801, 5: 0.19995902909307503, 6: 0.232714530449602, 7: 0.209866047852379, 8: 0.212556040406219, 9: 0.20288159843655804, 10: 0.19580515933918397, 15: 0.17111793515551002, 20: 0.152832538183622, 30: 0.131198578897542, 50: 0.104680624334611, 100: 0.0762579402903838, 200: 0.0558704526182638, 500: 0.0359832389317461, 1000: 0.0256126573433596, 2000: 0.0181944265400504, 5000: 0.0117329446468571, 10000: 0.0082270524584354, 20000: 0.00580460792299214, 40000: 0.00414892737222885, 72000: 0.00309340092038059}, '0.99995': {4: 0.248754269146416, 5: 0.19997839537608197, 6: 0.236548128358969, 7: 0.21096757693345103, 8: 0.21714917413729898, 9: 0.205979795735129, 10: 0.20029398089673, 15: 0.17590050570443203, 20: 0.15601016361897102, 30: 0.133691739483444, 50: 0.107496694235039, 100: 0.0785735967934979, 200: 0.0573877056330228, 500: 0.0369051995840645, 1000: 0.0265491336936829, 2000: 0.0186226037818523, 5000: 0.0119995948968375, 10000: 0.00852240989786251, 20000: 0.00599774739593151, 40000: 0.0042839159079761, 72000: 0.00319932767198801}, '0.99998': {4: 0.24930203997425898, 5: 0.199993151405815, 6: 0.2390887911995, 7: 0.212233348558702, 8: 0.22170007640450304, 9: 0.21054115498898, 10: 0.20565108964621898, 15: 0.18185667601316602, 20: 0.16131922583934502, 30: 0.137831637950694, 50: 0.11140887547015, 100: 0.0813458356889133, 200: 0.0593365901653878, 500: 0.0387221159256424, 1000: 0.027578430100535997, 2000: 0.0193001796565433, 5000: 0.0124410052027886, 10000: 0.00892863905540303, 20000: 0.00633099254378114, 40000: 0.0044187010443287895, 72000: 0.00332688234611187}, '0.99999': {4: 0.24945965232322498, 5: 0.199995525025673, 6: 0.24010356643629502, 7: 0.21266103831250602, 8: 0.225000835357532, 9: 0.21180033095039003, 10: 0.209682048785853, 15: 0.185743454151004, 20: 0.165568255916749, 30: 0.14155750962435099, 50: 0.113536607717411, 100: 0.0832963013755522, 200: 0.0607646310473911, 500: 0.039930259057650005, 1000: 0.0284430733108, 2000: 0.0196241518040617, 5000: 0.0129467396733128, 10000: 0.009138539330002129, 20000: 0.00656987109386762, 40000: 0.00450818604569179, 72000: 0.00339316094477355}, '1': {4: 0.24974836247845, 5: 0.199999835639211, 6: 0.24467288361776798, 7: 0.21353618608817, 8: 0.23377291968768302, 9: 0.21537991431762502, 10: 0.221530282182963, 15: 0.19224056333056197, 20: 0.175834459522789, 30: 0.163833046059817, 50: 0.11788671686531199, 100: 0.0926780423096737, 200: 0.0705309107882395, 500: 0.0431448163617178, 1000: 0.0313640941982108, 2000: 0.0213081254074584, 5000: 0.014396063834027, 10000: 0.00952234579566773, 20000: 0.006858294480462271, 40000: 0.00513477467565583, 72000: 0.00376331697005859}} qDiptab_df = pd.DataFrame(qDiptab_dict) diptable = np.array(qDiptab_df) ps = np.array(qDiptab_df.columns).astype(float) Ns = np.array(qDiptab_df.index) if N >= Ns[-1]: dip = transform_dip_to_other_nbr_pts(dip, N, Ns[-1]-0.1) N = Ns[-1]-0.1 iNlow = np.nonzero(Ns < N)[0][-1] qN = (N-Ns[iNlow])/(Ns[iNlow+1]-Ns[iNlow]) dip_sqrtN = np.sqrt(N)*dip dip_interpol_sqrtN = ( np.sqrt(Ns[iNlow])*diptable[iNlow, :] + qN*( np.sqrt(Ns[iNlow+1])*diptable[iNlow+1, :]-np.sqrt(Ns[iNlow])*diptable[iNlow, :])) if not (dip_interpol_sqrtN < dip_sqrtN).any(): return 1 iplow = np.nonzero(dip_interpol_sqrtN < dip_sqrtN)[0][-1] if iplow == len(dip_interpol_sqrtN) - 1: return 0 qp = (dip_sqrtN-dip_interpol_sqrtN[iplow])/(dip_interpol_sqrtN[iplow+1]-dip_interpol_sqrtN[iplow]) p_interpol = ps[iplow] + qp*(ps[iplow+1]-ps[iplow]) return 1 - p_interpol
dip - dip value computed from dip_from_cdf N - number of observations
def conditional_http_tween_factory(handler, registry): """ Tween that adds ETag headers and tells Pyramid to enable conditional responses where appropriate. """ settings = registry.settings if hasattr(registry, 'settings') else {} if 'generate_etag_for.list' in settings: route_names = settings.get('generate_etag_for.list').split() GENERATE_ETAG_ROUTE_NAMES.update(route_names) def conditional_http_tween(request): response = handler(request) if request.matched_route.name in GENERATE_ETAG_ROUTE_NAMES: # If the Last-Modified header has been set, we want to enable the # conditional response processing. if response.last_modified is not None: response.conditional_response = True # We want to only enable the conditional machinery if either we # were given an explicit ETag header by the view or we have a # buffered response and can generate the ETag header ourself. if response.etag is not None: response.conditional_response = True elif (isinstance(response.app_iter, Sequence) and len(response.app_iter) == 1) and response.body is not None: response.conditional_response = True response.md5_etag() return response return conditional_http_tween
Tween that adds ETag headers and tells Pyramid to enable conditional responses where appropriate.
def main(url, lamson_host, lamson_port, lamson_debug): """ Create, connect, and block on the Lamson worker. """ try: worker = LamsonWorker(url=url, lamson_host=lamson_host, lamson_port=lamson_port, lamson_debug=lamson_debug) worker.connect() worker.run_forever() except KeyboardInterrupt: worker.close()
Create, connect, and block on the Lamson worker.
def deleteRows(self, login, tableName, startRow, endRow): """ Parameters: - login - tableName - startRow - endRow """ self.send_deleteRows(login, tableName, startRow, endRow) self.recv_deleteRows()
Parameters: - login - tableName - startRow - endRow
def list_to_str(lst): """ Turn a list into a comma- and/or and-separated string. Parameters ---------- lst : :obj:`list` A list of strings to join into a single string. Returns ------- str_ : :obj:`str` A string with commas and/or ands separating th elements from ``lst``. """ if len(lst) == 1: str_ = lst[0] elif len(lst) == 2: str_ = ' and '.join(lst) elif len(lst) > 2: str_ = ', '.join(lst[:-1]) str_ += ', and {0}'.format(lst[-1]) else: raise ValueError('List of length 0 provided.') return str_
Turn a list into a comma- and/or and-separated string. Parameters ---------- lst : :obj:`list` A list of strings to join into a single string. Returns ------- str_ : :obj:`str` A string with commas and/or ands separating th elements from ``lst``.
def as_dict(self, voigt=False): """ Serializes the tensor object Args: voigt (bool): flag for whether to store entries in voigt-notation. Defaults to false, as information may be lost in conversion. Returns (Dict): serialized format tensor object """ input_array = self.voigt if voigt else self d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "input_array": input_array.tolist()} if voigt: d.update({"voigt": voigt}) return d
Serializes the tensor object Args: voigt (bool): flag for whether to store entries in voigt-notation. Defaults to false, as information may be lost in conversion. Returns (Dict): serialized format tensor object
def regulartype(prompt_template="default"): """Echo each character typed. Unlike magictype, this echos the characters the user is pressing. Returns: command_string | The command to be passed to the shell to run. This is | typed by the user. """ echo_prompt(prompt_template) command_string = "" cursor_position = 0 with raw_mode(): while True: in_char = getchar() if in_char in {ESC, CTRLC}: echo(carriage_return=True) raise click.Abort() elif in_char == TAB: echo("\r", nl=True) return in_char elif in_char == BACKSPACE: if cursor_position > 0: echo("\b \b", nl=False) command_string = command_string[:-1] cursor_position -= 1 elif in_char in RETURNS: echo("\r", nl=True) return command_string elif in_char == CTRLZ and hasattr(signal, "SIGTSTP"): # Background process os.kill(0, signal.SIGTSTP) # When doitlive is back in foreground, clear the terminal # and resume where we left off click.clear() echo_prompt(prompt_template) else: echo(in_char, nl=False) command_string += in_char cursor_position += 1
Echo each character typed. Unlike magictype, this echos the characters the user is pressing. Returns: command_string | The command to be passed to the shell to run. This is | typed by the user.
def convert_ligatures(text_string): ''' Coverts Latin character references within text_string to their corresponding unicode characters and returns converted string as type str. Keyword argument: - text_string: string instance Exceptions raised: - InputError: occurs should a string or NoneType not be passed as an argument ''' if text_string is None or text_string == "": return "" elif isinstance(text_string, str): for i in range(0, len(LIGATURES)): text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"]) return text_string else: raise InputError("none type or string not passed as an argument")
Coverts Latin character references within text_string to their corresponding unicode characters and returns converted string as type str. Keyword argument: - text_string: string instance Exceptions raised: - InputError: occurs should a string or NoneType not be passed as an argument
def follow_directories_loaded(self, fname): """Follow directories loaded during startup""" if self._to_be_loaded is None: return path = osp.normpath(to_text_string(fname)) if path in self._to_be_loaded: self._to_be_loaded.remove(path) if self._to_be_loaded is not None and len(self._to_be_loaded) == 0: self.fsmodel.directoryLoaded.disconnect( self.follow_directories_loaded) if self._scrollbar_positions is not None: # The tree view need some time to render branches: QTimer.singleShot(50, self.restore_scrollbar_positions)
Follow directories loaded during startup
def create_document( self, parent, collection_id, document_id, document, mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Creates a new document. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') >>> >>> # TODO: Initialize `collection_id`: >>> collection_id = '' >>> >>> # TODO: Initialize `document_id`: >>> document_id = '' >>> >>> # TODO: Initialize `document`: >>> document = {} >>> >>> response = client.create_document(parent, collection_id, document_id, document) Args: parent (str): The parent resource. For example: ``projects/{project_id}/databases/{database_id}/documents`` or ``projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}`` collection_id (str): The collection ID, relative to ``parent``, to list. For example: ``chatrooms``. document_id (str): The client-assigned document ID to use for this document. Optional. If not specified, an ID will be assigned by the service. document (Union[dict, ~google.cloud.firestore_v1beta1.types.Document]): The document to create. ``name`` must not be set. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Document` mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. If the document has a field that is not present in this mask, that field will not be returned in the response. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.firestore_v1beta1.types.Document` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "create_document" not in self._inner_api_calls: self._inner_api_calls[ "create_document" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_document, default_retry=self._method_configs["CreateDocument"].retry, default_timeout=self._method_configs["CreateDocument"].timeout, client_info=self._client_info, ) request = firestore_pb2.CreateDocumentRequest( parent=parent, collection_id=collection_id, document_id=document_id, document=document, mask=mask, ) return self._inner_api_calls["create_document"]( request, retry=retry, timeout=timeout, metadata=metadata )
Creates a new document. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') >>> >>> # TODO: Initialize `collection_id`: >>> collection_id = '' >>> >>> # TODO: Initialize `document_id`: >>> document_id = '' >>> >>> # TODO: Initialize `document`: >>> document = {} >>> >>> response = client.create_document(parent, collection_id, document_id, document) Args: parent (str): The parent resource. For example: ``projects/{project_id}/databases/{database_id}/documents`` or ``projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}`` collection_id (str): The collection ID, relative to ``parent``, to list. For example: ``chatrooms``. document_id (str): The client-assigned document ID to use for this document. Optional. If not specified, an ID will be assigned by the service. document (Union[dict, ~google.cloud.firestore_v1beta1.types.Document]): The document to create. ``name`` must not be set. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Document` mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. If the document has a field that is not present in this mask, that field will not be returned in the response. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.firestore_v1beta1.types.Document` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def screen_to_client(self, x, y): """ Translates window screen coordinates to client coordinates. @note: This is a simplified interface to some of the functionality of the L{win32.Point} class. @see: {win32.Point.screen_to_client} @type x: int @param x: Horizontal coordinate. @type y: int @param y: Vertical coordinate. @rtype: tuple( int, int ) @return: Translated coordinates in a tuple (x, y). @raise WindowsError: An error occured while processing this request. """ return tuple( win32.ScreenToClient( self.get_handle(), (x, y) ) )
Translates window screen coordinates to client coordinates. @note: This is a simplified interface to some of the functionality of the L{win32.Point} class. @see: {win32.Point.screen_to_client} @type x: int @param x: Horizontal coordinate. @type y: int @param y: Vertical coordinate. @rtype: tuple( int, int ) @return: Translated coordinates in a tuple (x, y). @raise WindowsError: An error occured while processing this request.
def get_build_properties(self, project, build_id, filter=None): """GetBuildProperties. [Preview API] Gets properties for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param [str] filter: A comma-delimited list of properties. If specified, filters to these specific properties. :rtype: :class:`<object> <azure.devops.v5_0.build.models.object>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') query_parameters = {} if filter is not None: filter = ",".join(filter) query_parameters['filter'] = self._serialize.query('filter', filter, 'str') response = self._send(http_method='GET', location_id='0a6312e9-0627-49b7-8083-7d74a64849c9', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('object', response)
GetBuildProperties. [Preview API] Gets properties for a build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param [str] filter: A comma-delimited list of properties. If specified, filters to these specific properties. :rtype: :class:`<object> <azure.devops.v5_0.build.models.object>`
def id_fix(value): """ fix @prefix values for ttl """ if value.startswith('KSC_M'): pass else: value = value.replace(':','_') if value.startswith('ERO') or value.startswith('OBI') or value.startswith('GO') or value.startswith('UBERON') or value.startswith('IAO'): value = 'obo:' + value elif value.startswith('birnlex') or value.startswith('nlx'): value = 'NIFSTD:' + value elif value.startswith('MESH'): value = ':'.join(value.split('_')) else: value = ':' + value return OntId(value).URIRef
fix @prefix values for ttl
def first(self): """ Return the first element. """ if self.mode == 'local': return self.values[0] if self.mode == 'spark': return self.values.first().toarray()
Return the first element.
def __process_requests_stack(self): """ Process the requests stack. """ while self.__requests_stack: try: exec self.__requests_stack.popleft() in self.__locals except Exception as error: umbra.exceptions.notify_exception_handler(error)
Process the requests stack.
def Random(self): """Chooses a random element from this PMF. Returns: float value from the Pmf """ if len(self.d) == 0: raise ValueError('Pmf contains no values.') target = random.random() total = 0.0 for x, p in self.d.iteritems(): total += p if total >= target: return x # we shouldn't get here assert False
Chooses a random element from this PMF. Returns: float value from the Pmf
def send_msg(self, msg, wait_nak=True, wait_timeout=WAIT_TIMEOUT): """Place a message on the send queue for sending. Message are sent in the order they are placed in the queue. """ msg_info = MessageInfo(msg=msg, wait_nak=wait_nak, wait_timeout=wait_timeout) _LOGGER.debug("Queueing msg: %s", msg) self._send_queue.put_nowait(msg_info)
Place a message on the send queue for sending. Message are sent in the order they are placed in the queue.
def get_wsgi_app(self, name=None, defaults=None): """ Reads the configuration source and finds and loads a WSGI application defined by the entry with name ``name`` per the PasteDeploy configuration format and loading mechanism. :param name: The named WSGI app to find, load and return. Defaults to ``None`` which becomes ``main`` inside :func:`paste.deploy.loadapp`. :param defaults: The ``global_conf`` that will be used during app instantiation. :return: A WSGI application. """ name = self._maybe_get_default_name(name) defaults = self._get_defaults(defaults) return loadapp( self.pastedeploy_spec, name=name, relative_to=self.relative_to, global_conf=defaults, )
Reads the configuration source and finds and loads a WSGI application defined by the entry with name ``name`` per the PasteDeploy configuration format and loading mechanism. :param name: The named WSGI app to find, load and return. Defaults to ``None`` which becomes ``main`` inside :func:`paste.deploy.loadapp`. :param defaults: The ``global_conf`` that will be used during app instantiation. :return: A WSGI application.
def echo_percent(self,transferred=1, status=None): '''Sample usage: f=lambda x,y:x+y ldata = range(10) toBeTransferred = reduce(f,range(10)) import time progress = ProgressBarUtils("viewbar", toBeTransferred=toBeTransferred, run_status="正在下载", fin_status="下载完成") for i in ldata: time.sleep(0.1) progress.echo_percent(i) ''' self.transferred += transferred self.status = status or self.status end_str = "\r" if self.transferred == self.toBeTransferred: end_str = '\n' self.status = status or self.fin_status print(self.__get_bar() + end_str)
Sample usage: f=lambda x,y:x+y ldata = range(10) toBeTransferred = reduce(f,range(10)) import time progress = ProgressBarUtils("viewbar", toBeTransferred=toBeTransferred, run_status="正在下载", fin_status="下载完成") for i in ldata: time.sleep(0.1) progress.echo_percent(i)
def is_subset(self, other): """Check that every element in self has a count <= in other. Args: other (Set) """ if isinstance(other, _basebag): for elem, count in self.counts(): if not count <= other.count(elem): return False else: for elem in self: if self.count(elem) > 1 or elem not in other: return False return True
Check that every element in self has a count <= in other. Args: other (Set)
def pxe_netboot(self, filename): """Specify which file ipxe should load during the netboot.""" new_port = { 'extra_dhcp_opts': [ {'opt_name': 'bootfile-name', 'opt_value': 'http://192.0.2.240:8088/' + filename, 'ip_version': 4, }, {'opt_name': 'tftp-server', 'opt_value': '192.0.2.240', 'ip_version': '4'}, {'opt_name': 'server-ip-address', 'opt_value': '192.0.2.240', 'ip_version': '4'} ] } self.neutron.update_port(self._provision_port_id, {'port': new_port})
Specify which file ipxe should load during the netboot.
def sqrt(scalar): """Square root of a :class:`Scalar` or scalar value This always returns a :class:`Scalar`, and uses a symbolic square root if possible (i.e., for non-floats):: >>> sqrt(2) sqrt(2) >>> sqrt(2.0) 1.414213... For a :class:`ScalarExpression` argument, it returns a :class:`ScalarPower` instance:: >>> braket = KetSymbol('Psi', hs=0).dag() * KetSymbol('Phi', hs=0) >>> nrm = sqrt(braket * braket.dag()) >>> print(srepr(nrm, indented=True)) ScalarPower( ScalarTimes( BraKet( KetSymbol( 'Phi', hs=LocalSpace( '0')), KetSymbol( 'Psi', hs=LocalSpace( '0'))), BraKet( KetSymbol( 'Psi', hs=LocalSpace( '0')), KetSymbol( 'Phi', hs=LocalSpace( '0')))), ScalarValue( Rational(1, 2))) """ if isinstance(scalar, ScalarValue): scalar = scalar.val if scalar == 1: return One elif scalar == 0: return Zero elif isinstance(scalar, (float, complex, complex128, float64)): return ScalarValue.create(numpy.sqrt(scalar)) elif isinstance(scalar, (int, sympy.Basic, int64)): return ScalarValue.create(sympy.sqrt(scalar)) elif isinstance(scalar, Scalar): return scalar**(sympy.sympify(1) / 2) else: raise TypeError("Unknown type of scalar: %r" % type(scalar))
Square root of a :class:`Scalar` or scalar value This always returns a :class:`Scalar`, and uses a symbolic square root if possible (i.e., for non-floats):: >>> sqrt(2) sqrt(2) >>> sqrt(2.0) 1.414213... For a :class:`ScalarExpression` argument, it returns a :class:`ScalarPower` instance:: >>> braket = KetSymbol('Psi', hs=0).dag() * KetSymbol('Phi', hs=0) >>> nrm = sqrt(braket * braket.dag()) >>> print(srepr(nrm, indented=True)) ScalarPower( ScalarTimes( BraKet( KetSymbol( 'Phi', hs=LocalSpace( '0')), KetSymbol( 'Psi', hs=LocalSpace( '0'))), BraKet( KetSymbol( 'Psi', hs=LocalSpace( '0')), KetSymbol( 'Phi', hs=LocalSpace( '0')))), ScalarValue( Rational(1, 2)))
def _log(self, content): """ Write a string to the log """ self._buffer += content if self._auto_flush: self.flush()
Write a string to the log
def ProcessHuntClientCrash(flow_obj, client_crash_info): """Processes client crash triggerted by a given hunt-induced flow.""" if not hunt.IsLegacyHunt(flow_obj.parent_hunt_id): hunt.StopHuntIfCrashLimitExceeded(flow_obj.parent_hunt_id) return hunt_urn = rdfvalue.RDFURN("hunts").Add(flow_obj.parent_hunt_id) with aff4.FACTORY.Open(hunt_urn, mode="rw") as fd: # Legacy AFF4 code expects token to be set. fd.token = access_control.ACLToken(username=fd.creator) fd.RegisterCrash(client_crash_info)
Processes client crash triggerted by a given hunt-induced flow.
def deserialize(cls, did_doc: dict) -> 'DIDDoc': """ Construct DIDDoc object from dict representation. Raise BadIdentifier for bad DID. :param did_doc: DIDDoc dict reprentation. :return: DIDDoc from input json. """ rv = None if 'id' in did_doc: rv = DIDDoc(did_doc['id']) else: # get DID to serve as DID document identifier from first public key if 'publicKey' not in did_doc: LOGGER.debug('DIDDoc.deserialize <!< no identifier in DID document') raise AbsentDIDDocItem('No identifier in DID document') for pubkey in did_doc['publicKey']: pubkey_did = canon_did(resource(pubkey['id'])) if ok_did(pubkey_did): rv = DIDDoc(pubkey_did) break else: LOGGER.debug('DIDDoc.deserialize <!< no identifier in DID document') raise AbsentDIDDocItem('No identifier in DID document') for pubkey in did_doc['publicKey']: # include public keys and authentication keys by reference pubkey_type = PublicKeyType.get(pubkey['type']) authn = any( canon_ref(rv.did, ak.get('publicKey', '')) == canon_ref(rv.did, pubkey['id']) for ak in did_doc.get('authentication', {}) if isinstance(ak.get('publicKey', None), str)) key = PublicKey( # initialization canonicalizes id rv.did, pubkey['id'], pubkey[pubkey_type.specifier], pubkey_type, canon_did(pubkey['controller']), authn) rv.pubkey[key.id] = key for akey in did_doc.get('authentication', {}): # include embedded authentication keys pk_ref = akey.get('publicKey', None) if pk_ref: pass # got it already with public keys else: pubkey_type = PublicKeyType.get(akey['type']) key = PublicKey( # initialization canonicalized id rv.did, akey['id'], akey[pubkey_type.specifier], pubkey_type, canon_did(akey['controller']), True) rv.pubkey[key.id] = key for service in did_doc.get('service', {}): endpoint = service['serviceEndpoint'] svc = Service( # initialization canonicalizes id rv.did, service.get('id', canon_ref(rv.did, 'assigned-service-{}'.format(len(rv.service)), ';')), service['type'], rv.add_service_pubkeys(service, 'recipientKeys'), rv.add_service_pubkeys(service, ['mediatorKeys', 'routingKeys']), canon_ref(rv.did, endpoint, ';') if ';' in endpoint else endpoint, service.get('priority', None)) rv.service[svc.id] = svc return rv
Construct DIDDoc object from dict representation. Raise BadIdentifier for bad DID. :param did_doc: DIDDoc dict reprentation. :return: DIDDoc from input json.
def readdir(self, path, fh): """Called by FUSE when a directory is opened. Returns a list of file and directory names for the directory. """ log.debug('readdir(): {}'.format(path)) try: dir = self._directory_cache[path] except KeyError: dir = self._get_directory(path) self._directory_cache[path] = dir return dir
Called by FUSE when a directory is opened. Returns a list of file and directory names for the directory.
def com_google_fonts_check_contour_count(ttFont): """Check if each glyph has the recommended amount of contours. This check is useful to assure glyphs aren't incorrectly constructed. The desired_glyph_data module contains the 'recommended' countour count for encoded glyphs. The contour counts are derived from fonts which were chosen for their quality and unique design decisions for particular glyphs. In the future, additional glyph data can be included. A good addition would be the 'recommended' anchor counts for each glyph. """ from fontbakery.glyphdata import desired_glyph_data as glyph_data from fontbakery.utils import (get_font_glyph_data, pretty_print_list) # rearrange data structure: desired_glyph_data = {} for glyph in glyph_data: desired_glyph_data[glyph['unicode']] = glyph bad_glyphs = [] desired_glyph_contours = {f: desired_glyph_data[f]['contours'] for f in desired_glyph_data} font_glyph_data = get_font_glyph_data(ttFont) if font_glyph_data is None: yield FAIL, "This font lacks cmap data." else: font_glyph_contours = {f['unicode']: list(f['contours'])[0] for f in font_glyph_data} shared_glyphs = set(desired_glyph_contours) & set(font_glyph_contours) for glyph in shared_glyphs: if font_glyph_contours[glyph] not in desired_glyph_contours[glyph]: bad_glyphs.append([glyph, font_glyph_contours[glyph], desired_glyph_contours[glyph]]) if len(bad_glyphs) > 0: cmap = ttFont['cmap'].getcmap(PlatformID.WINDOWS, WindowsEncodingID.UNICODE_BMP).cmap bad_glyphs_name = [("Glyph name: {}\t" "Contours detected: {}\t" "Expected: {}").format(cmap[name], count, pretty_print_list(expected, shorten=None, glue="or")) for name, count, expected in bad_glyphs] yield WARN, (("This check inspects the glyph outlines and detects the" " total number of contours in each of them. The expected" " values are infered from the typical ammounts of" " contours observed in a large collection of reference" " font families. The divergences listed below may simply" " indicate a significantly different design on some of" " your glyphs. On the other hand, some of these may flag" " actual bugs in the font such as glyphs mapped to an" " incorrect codepoint. Please consider reviewing" " the design and codepoint assignment of these to make" " sure they are correct.\n" "\n" "The following glyphs do not have the recommended" " number of contours:\n" "\n{}").format('\n'.join(bad_glyphs_name))) else: yield PASS, "All glyphs have the recommended amount of contours"
Check if each glyph has the recommended amount of contours. This check is useful to assure glyphs aren't incorrectly constructed. The desired_glyph_data module contains the 'recommended' countour count for encoded glyphs. The contour counts are derived from fonts which were chosen for their quality and unique design decisions for particular glyphs. In the future, additional glyph data can be included. A good addition would be the 'recommended' anchor counts for each glyph.
def _filter_link_tag_data(self, source, soup, data, url): """This method filters the web page content for link tags that match patterns given in the ``FILTER_MAPS`` :param source: The key of the meta dictionary in ``FILTER_MAPS['link']`` :type source: string :param soup: BeautifulSoup instance to find meta tags :type soup: instance :param data: The response dictionary to manipulate :type data: (dict) :param url: URL used for making an absolute url :type url: string """ link = FILTER_MAPS['link'][source] html = soup.find_all('link', {link['key']: link['pattern']}) if link['type'] == 'url': for line in html: data['url'] = line.get('href') else: for line in html: data['images'].append({ 'src': urljoin(url, line.get('href')), 'type': link['type'], })
This method filters the web page content for link tags that match patterns given in the ``FILTER_MAPS`` :param source: The key of the meta dictionary in ``FILTER_MAPS['link']`` :type source: string :param soup: BeautifulSoup instance to find meta tags :type soup: instance :param data: The response dictionary to manipulate :type data: (dict) :param url: URL used for making an absolute url :type url: string
def get_segment_count_data(self, start, end, use_shapes=True): """ Get segment data including PTN vehicle counts per segment that are fully _contained_ within the interval (start, end) Parameters ---------- start : int start time of the simulation in unix time end : int end time of the simulation in unix time use_shapes : bool, optional whether to include shapes (if available) Returns ------- seg_data : list each element in the list is a dict containing keys: "trip_I", "lats", "lons", "shape_id", "stop_seqs", "shape_breaks" """ cur = self.conn.cursor() # get all possible trip_ids that take place between start and end trips_df = self.get_tripIs_active_in_range(start, end) # stop_I -> count, lat, lon, name segment_counts = Counter() seg_to_info = {} # tripI_to_seq = "inverted segToShapeData" tripI_to_seq = defaultdict(list) # loop over all trips: for row in trips_df.itertuples(): # get stop_data and store it: stops_df = self.get_trip_stop_time_data(row.trip_I, row.day_start_ut) for i in range(len(stops_df) - 1): (stop_I, dep_time_ut, s_lat, s_lon, s_seq, shape_break) = stops_df.iloc[i] (stop_I_n, dep_time_ut_n, s_lat_n, s_lon_n, s_seq_n, shape_break_n) = stops_df.iloc[i + 1] # test if _contained_ in the interval # overlap would read: # (dep_time_ut <= end) and (start <= dep_time_ut_n) if (dep_time_ut >= start) and (dep_time_ut_n <= end): seg = (stop_I, stop_I_n) segment_counts[seg] += 1 if seg not in seg_to_info: seg_to_info[seg] = { u"trip_I": row.trip_I, u"lats": [s_lat, s_lat_n], u"lons": [s_lon, s_lon_n], u"shape_id": row.shape_id, u"stop_seqs": [s_seq, s_seq_n], u"shape_breaks": [shape_break, shape_break_n] } tripI_to_seq[row.trip_I].append(seg) stop_names = {} for (stop_I, stop_J) in segment_counts.keys(): for s in [stop_I, stop_J]: if s not in stop_names: stop_names[s] = self.stop(s)[u'name'].values[0] seg_data = [] for seg, count in segment_counts.items(): segInfo = seg_to_info[seg] shape_breaks = segInfo[u"shape_breaks"] seg_el = {} if use_shapes and shape_breaks and shape_breaks[0] and shape_breaks[1]: shape = shapes.get_shape_between_stops( cur, segInfo[u'trip_I'], shape_breaks=shape_breaks ) seg_el[u'lats'] = segInfo[u'lats'][:1] + shape[u'lat'] + segInfo[u'lats'][1:] seg_el[u'lons'] = segInfo[u'lons'][:1] + shape[u'lon'] + segInfo[u'lons'][1:] else: seg_el[u'lats'] = segInfo[u'lats'] seg_el[u'lons'] = segInfo[u'lons'] seg_el[u'name'] = stop_names[seg[0]] + u"-" + stop_names[seg[1]] seg_el[u'count'] = count seg_data.append(seg_el) return seg_data
Get segment data including PTN vehicle counts per segment that are fully _contained_ within the interval (start, end) Parameters ---------- start : int start time of the simulation in unix time end : int end time of the simulation in unix time use_shapes : bool, optional whether to include shapes (if available) Returns ------- seg_data : list each element in the list is a dict containing keys: "trip_I", "lats", "lons", "shape_id", "stop_seqs", "shape_breaks"
def upload_part(self, data, index=None, display_progress=False, report_progress_fn=None, **kwargs): """ :param data: Data to be uploaded in this part :type data: str or mmap object, bytes on python3 :param index: Index of part to be uploaded; must be in [1, 10000] :type index: integer :param display_progress: Whether to print "." to stderr when done :type display_progress: boolean :param report_progress_fn: Optional: a function to call that takes in two arguments (self, # bytes transmitted) :type report_progress_fn: function or None :raises: :exc:`dxpy.exceptions.DXFileError` if *index* is given and is not in the correct range, :exc:`requests.exceptions.HTTPError` if upload fails Uploads the data in *data* as part number *index* for the associated file. If no value for *index* is given, *index* defaults to 1. This probably only makes sense if this is the only part to be uploaded. """ if not USING_PYTHON2: # In python3, the underlying system methods use the 'bytes' type, not 'string' assert(isinstance(data, bytes)) req_input = {} if index is not None: req_input["index"] = int(index) md5 = hashlib.md5() if hasattr(data, 'seek') and hasattr(data, 'tell'): # data is a buffer; record initial position (so we can rewind back) rewind_input_buffer_offset = data.tell() while True: bytes_read = data.read(MD5_READ_CHUNK_SIZE) if bytes_read: md5.update(bytes_read) else: break # rewind the buffer to original position data.seek(rewind_input_buffer_offset) else: md5.update(data) req_input["md5"] = md5.hexdigest() req_input["size"] = len(data) def get_upload_url_and_headers(): # This function is called from within a retry loop, so to avoid amplifying the number of retries # geometrically, we decrease the allowed number of retries for the nested API call every time. if 'max_retries' not in kwargs: kwargs['max_retries'] = dxpy.DEFAULT_RETRIES elif kwargs['max_retries'] > 0: kwargs['max_retries'] -= 1 if "timeout" not in kwargs: kwargs["timeout"] = FILE_REQUEST_TIMEOUT resp = dxpy.api.file_upload(self._dxid, req_input, **kwargs) url = resp["url"] return url, _validate_headers(resp.get("headers", {})) # The file upload API requires us to get a pre-authenticated upload URL (and headers for it) every time we # attempt an upload. Because DXHTTPRequest will retry requests under retryable conditions, we give it a callback # to ask us for a new upload URL every time it attempts a request (instead of giving them directly). dxpy.DXHTTPRequest(get_upload_url_and_headers, data, jsonify_data=False, prepend_srv=False, always_retry=True, timeout=FILE_REQUEST_TIMEOUT, auth=None, method='PUT') self._num_uploaded_parts += 1 if display_progress: warn(".") if report_progress_fn is not None: report_progress_fn(self, len(data))
:param data: Data to be uploaded in this part :type data: str or mmap object, bytes on python3 :param index: Index of part to be uploaded; must be in [1, 10000] :type index: integer :param display_progress: Whether to print "." to stderr when done :type display_progress: boolean :param report_progress_fn: Optional: a function to call that takes in two arguments (self, # bytes transmitted) :type report_progress_fn: function or None :raises: :exc:`dxpy.exceptions.DXFileError` if *index* is given and is not in the correct range, :exc:`requests.exceptions.HTTPError` if upload fails Uploads the data in *data* as part number *index* for the associated file. If no value for *index* is given, *index* defaults to 1. This probably only makes sense if this is the only part to be uploaded.
def configfilepopulator(self): """Populates an unpopulated config.xml file with run-specific values and creates the file in the appropriate location""" # Set the number of cycles for each read and index using the number of reads specified in the sample sheet self.forwardlength = self.metadata.header.forwardlength self.reverselength = self.metadata.header.reverselength # Create a list of lists containing [cycle start, cycle end, and :runid] for each of forward reads, index 1 # index 2, and reverse reads cycles = [[1, self.forwardlength, self.runid], [self.forwardlength + 1, self.forwardlength + 8, self.runid], [self.forwardlength + 9, self.forwardlength + 16, self.runid], [self.forwardlength + 17, self.forwardlength + 16 + self.reverselength, self.runid]] # A dictionary of parameters (keys) and the values to use when repopulating the config file parameters = {'RunFolder': self.runid, 'RunFolderDate': self.metadata.date.replace("-", ""), 'RunFolderId': self.metadata.runnumber, 'RunFlowcellId': self.metadata.flowcell} # Load the xml file using element tree config = ElementTree.parse("{}/config.xml".format(self.homepath)) # Get the root of the tree configroot = config.getroot() # The run node is the only child node of the root for run in configroot: # Iterate through the child nodes. There are three nodes sections that must be populated for child in run: # Find the cycles tag if child.tag == 'Cycles': # Set the attributes with a dictionary containing the total reads child.attrib = {'Last': '{}'.format(self.forwardlength + 16 + self.reverselength), 'Number': '{}'.format(self.totalreads), 'First': '1'} elif child.tag == 'RunParameters': # Name the child as runparameter for easier coding runparameters = child for runparameter in runparameters: # This replaces data in both 'ImagingReads' and 'Reads' nodes if 'Reads' in runparameter.tag: # Enumerate through the run parameters for indexcount, reads in enumerate(runparameter): # The values for the index are 1, 2, 3, 4. Subtract one to get the index of the first # list in cycles index = int(runparameter.attrib['Index']) - 1 # Set the text value as the appropriate value from cycles reads.text = str(cycles[index][indexcount]) # Populate the instrument value if runparameter.tag == 'Instrument': runparameter.text = self.instrument # Iterate through the parameters in the parameter dictionary for parameter in parameters: # If the key is encountered if runparameter.tag == parameter: # Replace the text with the value runparameter.text = parameters[parameter] if 'Barcode' in runparameter.tag: for cycle, barcode in enumerate(runparameter): # Add the barcode cycles. These are the number of forward reads (+ 1 as the barcode # starts 1 cycle after the first run) plus the current iterator barcode.text = str(self.forwardlength + 1 + cycle) # Write the modified config file to the desired location config.write('{}Data/Intensities/BaseCalls/config.xml'.format(self.miseqfolder))
Populates an unpopulated config.xml file with run-specific values and creates the file in the appropriate location
def circumcenter(pt0, pt1, pt2): r"""Calculate and return the circumcenter of a circumcircle generated by a given triangle. All three points must be unique or a division by zero error will be raised. Parameters ---------- pt0: (x, y) Starting vertex of triangle pt1: (x, y) Second vertex of triangle pt2: (x, y) Final vertex of a triangle Returns ------- cc: (x, y) circumcenter coordinates See Also -------- circumcenter """ a_x = pt0[0] a_y = pt0[1] b_x = pt1[0] b_y = pt1[1] c_x = pt2[0] c_y = pt2[1] bc_y_diff = b_y - c_y ca_y_diff = c_y - a_y ab_y_diff = a_y - b_y cb_x_diff = c_x - b_x ac_x_diff = a_x - c_x ba_x_diff = b_x - a_x d_div = (a_x * bc_y_diff + b_x * ca_y_diff + c_x * ab_y_diff) if d_div == 0: raise ZeroDivisionError d_inv = 0.5 / d_div a_mag = a_x * a_x + a_y * a_y b_mag = b_x * b_x + b_y * b_y c_mag = c_x * c_x + c_y * c_y cx = (a_mag * bc_y_diff + b_mag * ca_y_diff + c_mag * ab_y_diff) * d_inv cy = (a_mag * cb_x_diff + b_mag * ac_x_diff + c_mag * ba_x_diff) * d_inv return cx, cy
r"""Calculate and return the circumcenter of a circumcircle generated by a given triangle. All three points must be unique or a division by zero error will be raised. Parameters ---------- pt0: (x, y) Starting vertex of triangle pt1: (x, y) Second vertex of triangle pt2: (x, y) Final vertex of a triangle Returns ------- cc: (x, y) circumcenter coordinates See Also -------- circumcenter
def fitNull(self, verbose=False, cache=False, out_dir='./cache', fname=None, rewrite=False, seed=None, n_times=10, factr=1e3, init_method=None): """ Fit null model """ if seed is not None: sp.random.seed(seed) read_from_file = False if cache: assert fname is not None, 'MultiTraitSetTest:: specify fname' if not os.path.exists(out_dir): os.makedirs(out_dir) out_file = os.path.join(out_dir,fname) read_from_file = os.path.exists(out_file) and not rewrite RV = {} if read_from_file: f = h5py.File(out_file,'r') for key in list(f.keys()): RV[key] = f[key][:] f.close() self.setNull(RV) else: start = TIME.time() if self.bgRE: self._gpNull = GP2KronSum(Y=self.Y, F=None, A=None, Cg=self.Cg, Cn=self.Cn, R=None, S_R=self.S_R, U_R=self.U_R) else: self._gpNull = GP2KronSumLR(self.Y, self.Cn, G=sp.ones((self.N,1)), F=self.F, A=self.A) # freezes Cg to 0 n_params = self._gpNull.covar.Cr.getNumberParams() self._gpNull.covar.Cr.setParams(1e-9 * sp.ones(n_params)) self._gpNull.covar.act_Cr = False for i in range(n_times): params0 = self._initParams(init_method=init_method) self._gpNull.setParams(params0) conv, info = self._gpNull.optimize(verbose=verbose, factr=factr) if conv: break if not conv: warnings.warn("not converged") LMLgrad = (self._gpNull.LML_grad()['covar']**2).mean() LML = self._gpNull.LML() if self._gpNull.mean.n_terms==1: RV['B'] = self._gpNull.mean.B[0] elif self._gpNull.mean.n_terms>1: warning.warn('generalize to more than 1 fixed effect term') if self.bgRE: RV['params0_g'] = self.Cg.getParams() else: RV['params0_g'] = sp.zeros_like(self.Cn.getParams()) RV['params0_n'] = self.Cn.getParams() if self.bgRE: RV['Cg'] = self.Cg.K() else: RV['Cg'] = sp.zeros_like(self.Cn.K()) RV['Cn'] = self.Cn.K() RV['conv'] = sp.array([conv]) RV['time'] = sp.array([TIME.time()-start]) RV['NLL0'] = sp.array([LML]) RV['LMLgrad'] = sp.array([LMLgrad]) RV['nit'] = sp.array([info['nit']]) RV['funcalls'] = sp.array([info['funcalls']]) self.null = RV if cache: f = h5py.File(out_file,'w') smartDumpDictHdf5(RV,f) f.close() return RV
Fit null model
def add_perm(self, subj_str, perm_str): """Add a permission for a subject. Args: subj_str : str Subject for which to add permission(s) perm_str : str Permission to add. Implicitly adds all lower permissions. E.g., ``write`` will also add ``read``. """ self._assert_valid_permission(perm_str) self._perm_dict.setdefault(perm_str, set()).add(subj_str)
Add a permission for a subject. Args: subj_str : str Subject for which to add permission(s) perm_str : str Permission to add. Implicitly adds all lower permissions. E.g., ``write`` will also add ``read``.
def resizeEvent(self, event): """ Overloads the resize event to control if we are still editing. If we are resizing, then we are no longer editing. """ curr_item = self.currentItem() self.closePersistentEditor(curr_item) super(XMultiTagEdit, self).resizeEvent(event)
Overloads the resize event to control if we are still editing. If we are resizing, then we are no longer editing.
def shebang(self): """Get the file shebang if is has one.""" with open(self.path, 'rb') as file_handle: hashtag = file_handle.read(2) if hashtag == b'#!': file_handle.seek(0) return file_handle.readline().decode('utf8') return None
Get the file shebang if is has one.
def in6_getnsma(a): """ Return link-local solicited-node multicast address for given address. Passed address must be provided in network format. Returned value is also in network format. """ r = in6_and(a, inet_pton(socket.AF_INET6, '::ff:ffff')) r = in6_or(inet_pton(socket.AF_INET6, 'ff02::1:ff00:0'), r) return r
Return link-local solicited-node multicast address for given address. Passed address must be provided in network format. Returned value is also in network format.
def is_excluded_for_sdesc(self, sdesc, is_tpl=False): """ Check whether this host should have the passed service *description* be "excluded" or "not included". :param sdesc: service description :type sdesc: :param is_tpl: True if service is template, otherwise False :type is_tpl: bool :return: True if service description excluded, otherwise False :rtype: bool """ if not is_tpl and self.service_includes: return sdesc not in self.service_includes if self.service_excludes: return sdesc in self.service_excludes return False
Check whether this host should have the passed service *description* be "excluded" or "not included". :param sdesc: service description :type sdesc: :param is_tpl: True if service is template, otherwise False :type is_tpl: bool :return: True if service description excluded, otherwise False :rtype: bool
def do_replace(eval_ctx, s, old, new, count=None): """Return a copy of the value with all occurrences of a substring replaced with a new one. The first argument is the substring that should be replaced, the second is the replacement string. If the optional third argument ``count`` is given, only the first ``count`` occurrences are replaced: .. sourcecode:: jinja {{ "Hello World"|replace("Hello", "Goodbye") }} -> Goodbye World {{ "aaaaargh"|replace("a", "d'oh, ", 2) }} -> d'oh, d'oh, aaargh """ if count is None: count = -1 if not eval_ctx.autoescape: return unicode(s).replace(unicode(old), unicode(new), count) if hasattr(old, '__html__') or hasattr(new, '__html__') and \ not hasattr(s, '__html__'): s = escape(s) else: s = soft_unicode(s) return s.replace(soft_unicode(old), soft_unicode(new), count)
Return a copy of the value with all occurrences of a substring replaced with a new one. The first argument is the substring that should be replaced, the second is the replacement string. If the optional third argument ``count`` is given, only the first ``count`` occurrences are replaced: .. sourcecode:: jinja {{ "Hello World"|replace("Hello", "Goodbye") }} -> Goodbye World {{ "aaaaargh"|replace("a", "d'oh, ", 2) }} -> d'oh, d'oh, aaargh
def forge_fdf(pdf_form_url=None, fdf_data_strings=[], fdf_data_names=[], fields_hidden=[], fields_readonly=[], checkbox_checked_name=b"Yes"): """Generates fdf string from fields specified * pdf_form_url (default: None): just the url for the form. * fdf_data_strings (default: []): array of (string, value) tuples for the form fields (or dicts). Value is passed as a UTF-16 encoded string, unless True/False, in which case it is assumed to be a checkbox (and passes names, '/Yes' (by default) or '/Off'). * fdf_data_names (default: []): array of (string, value) tuples for the form fields (or dicts). Value is passed to FDF as a name, '/value' * fields_hidden (default: []): list of field names that should be set hidden. * fields_readonly (default: []): list of field names that should be set readonly. * checkbox_checked_value (default: "Yes"): By default means a checked checkboxes gets passed the value "/Yes". You may find that the default does not work with your PDF, in which case you might want to try "On". The result is a string suitable for writing to a .fdf file. """ fdf = [b'%FDF-1.2\x0a%\xe2\xe3\xcf\xd3\x0d\x0a'] fdf.append(b'1 0 obj\x0a<</FDF') fdf.append(b'<</Fields[') fdf.append(b''.join(handle_data_strings(fdf_data_strings, fields_hidden, fields_readonly, checkbox_checked_name))) fdf.append(b''.join(handle_data_names(fdf_data_names, fields_hidden, fields_readonly))) if pdf_form_url: fdf.append(b''.join(b'/F (', smart_encode_str(pdf_form_url), b')\x0a')) fdf.append(b']\x0a') fdf.append(b'>>\x0a') fdf.append(b'>>\x0aendobj\x0a') fdf.append(b'trailer\x0a\x0a<<\x0a/Root 1 0 R\x0a>>\x0a') fdf.append(b'%%EOF\x0a\x0a') return b''.join(fdf)
Generates fdf string from fields specified * pdf_form_url (default: None): just the url for the form. * fdf_data_strings (default: []): array of (string, value) tuples for the form fields (or dicts). Value is passed as a UTF-16 encoded string, unless True/False, in which case it is assumed to be a checkbox (and passes names, '/Yes' (by default) or '/Off'). * fdf_data_names (default: []): array of (string, value) tuples for the form fields (or dicts). Value is passed to FDF as a name, '/value' * fields_hidden (default: []): list of field names that should be set hidden. * fields_readonly (default: []): list of field names that should be set readonly. * checkbox_checked_value (default: "Yes"): By default means a checked checkboxes gets passed the value "/Yes". You may find that the default does not work with your PDF, in which case you might want to try "On". The result is a string suitable for writing to a .fdf file.
def triples_to_graph(self, triples, top=None): """ Create a Graph from *triples* considering codec configuration. The Graph class does not know about information in the codec, so if Graph instantiation depends on special `TYPE_REL` or `TOP_VAR` values, use this function instead of instantiating a Graph object directly. This is also where edge normalization (de-inversion) and value type conversion occur (via handle_triple()). Args: triples: an iterable of (lhs, relation, rhs) triples top: node identifier of the top node Returns: a Graph object """ inferred_top = triples[0][0] if triples else None ts = [] for triple in triples: if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL: inferred_top = triple[2] else: ts.append(self.handle_triple(*triple)) top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target return Graph(ts, top=top or inferred_top)
Create a Graph from *triples* considering codec configuration. The Graph class does not know about information in the codec, so if Graph instantiation depends on special `TYPE_REL` or `TOP_VAR` values, use this function instead of instantiating a Graph object directly. This is also where edge normalization (de-inversion) and value type conversion occur (via handle_triple()). Args: triples: an iterable of (lhs, relation, rhs) triples top: node identifier of the top node Returns: a Graph object
def _check_dir(self): """Makes sure that the working directory for the wrapper modules exists. """ from os import path, mkdir if not path.isdir(self.dirpath): mkdir(self.dirpath) #Copy the ftypes.py module shipped with fortpy to the local directory. ftypes = path.join(get_fortpy_templates(), "ftypes.py") from shutil import copy copy(ftypes, self.dirpath) #Create the __init__.py file so that the library becomes a package for #its module contents. with open(path.join(self.dirpath, "__init__.py"), 'w') as f: f.write("# Auto-generated for package structure by fortpy.") #We also need to make sure that the fortpy deallocator module is present for #compilation in the shared library. if not path.isdir(self.f90path): mkdir(self.f90path) #Copy the ftypes.py module shipped with fortpy to the local directory. ftypes = path.join(get_fortpy_templates(), "ftypes_dealloc.f90") from shutil import copy copy(ftypes, self.f90path)
Makes sure that the working directory for the wrapper modules exists.
def new_file(self, basedir): """New file""" title = _("New file") filters = _("All files")+" (*)" def create_func(fname): """File creation callback""" if osp.splitext(fname)[1] in ('.py', '.pyw', '.ipy'): create_script(fname) else: with open(fname, 'wb') as f: f.write(to_binary_string('')) fname = self.create_new_file(basedir, title, filters, create_func) if fname is not None: self.open([fname])
New file
def update_resolver_nameservers(resolver, nameservers, nameserver_filename): """ Update a resolver's nameservers. The following priority is taken: 1. Nameservers list provided as an argument 2. A filename containing a list of nameservers 3. The original nameservers associated with the resolver """ if nameservers: resolver.nameservers = nameservers elif nameserver_filename: nameservers = get_stripped_file_lines(nameserver_filename) resolver.nameservers = nameservers else: # Use original nameservers pass return resolver
Update a resolver's nameservers. The following priority is taken: 1. Nameservers list provided as an argument 2. A filename containing a list of nameservers 3. The original nameservers associated with the resolver
def gen_reference_primitive(polypeptide, start, end): """ Generates a reference Primitive for a Polypeptide given start and end coordinates. Notes ----- Uses the rise_per_residue of the Polypeptide primitive to define the separation of points on the line joining start and end. Parameters ---------- polypeptide : Polypeptide start : numpy.array 3D coordinates of reference axis start end : numpy.array 3D coordinates of reference axis end Returns ------- reference_primitive : Primitive """ prim = polypeptide.primitive q = find_foot(a=start, b=end, p=prim.coordinates[0]) ax = Axis(start=q, end=end) # flip axis if antiparallel to polypeptide_vector if not is_acute(polypeptide_vector(polypeptide), ax.unit_tangent): ax = Axis(start=end, end=q) arc_length = 0 points = [ax.start] for rise in prim.rise_per_residue()[:-1]: arc_length += rise t = ax.t_from_arc_length(arc_length=arc_length) point = ax.point(t) points.append(point) reference_primitive = Primitive.from_coordinates(points) return reference_primitive
Generates a reference Primitive for a Polypeptide given start and end coordinates. Notes ----- Uses the rise_per_residue of the Polypeptide primitive to define the separation of points on the line joining start and end. Parameters ---------- polypeptide : Polypeptide start : numpy.array 3D coordinates of reference axis start end : numpy.array 3D coordinates of reference axis end Returns ------- reference_primitive : Primitive
def execute(self, args): """Execute show subcommand.""" if args.name is not None: self.show_workspace(slashes2dash(args.name)) elif args.all is not None: self.show_all()
Execute show subcommand.
def get_network_name(network_id): """ Return the keeper network name based on the current ethereum network id. Return `development` for every network id that is not mapped. :param network_id: Network id, int :return: Network name, str """ if os.environ.get('KEEPER_NETWORK_NAME'): logging.debug('keeper network name overridden by an environment variable: {}'.format( os.environ.get('KEEPER_NETWORK_NAME'))) return os.environ.get('KEEPER_NETWORK_NAME') return Keeper._network_name_map.get(network_id, Keeper.DEFAULT_NETWORK_NAME)
Return the keeper network name based on the current ethereum network id. Return `development` for every network id that is not mapped. :param network_id: Network id, int :return: Network name, str
def p_catch(self, p): """catch : CATCH LPAREN identifier RPAREN block""" p[0] = self.asttypes.Catch(identifier=p[3], elements=p[5]) p[0].setpos(p)
catch : CATCH LPAREN identifier RPAREN block
def list_commands(self, ctx): """Return a list of commands present in the commands and resources folders, but not subcommands. """ commands = set(self.list_resource_commands()) commands.union(set(self.list_misc_commands())) return sorted(commands)
Return a list of commands present in the commands and resources folders, but not subcommands.
def coactivation(dataset, seed, threshold=0.0, output_dir='.', prefix='', r=6): """ Compute and save coactivation map given input image as seed. This is essentially just a wrapper for a meta-analysis defined by the contrast between those studies that activate within the seed and those that don't. Args: dataset: a Dataset instance containing study and activation data. seed: either a Nifti or Analyze image defining the boundaries of the seed, or a list of triples (x/y/z) defining the seed(s). Note that voxels do not need to be contiguous to define a seed--all supra- threshold voxels will be lumped together. threshold: optional float indicating the threshold above which voxels are considered to be part of the seed ROI (default = 0) r: optional integer indicating radius (in mm) of spheres to grow (only used if seed is a list of coordinates). output_dir: output directory to write to. Defaults to current. If none, defaults to using the first part of the seed filename. prefix: optional string to prepend to all coactivation images. Output: A set of meta-analysis images identical to that generated by meta.MetaAnalysis. """ if isinstance(seed, string_types): ids = dataset.get_studies(mask=seed, activation_threshold=threshold) else: ids = dataset.get_studies(peaks=seed, r=r, activation_threshold=threshold) ma = meta.MetaAnalysis(dataset, ids) ma.save_results(output_dir, prefix)
Compute and save coactivation map given input image as seed. This is essentially just a wrapper for a meta-analysis defined by the contrast between those studies that activate within the seed and those that don't. Args: dataset: a Dataset instance containing study and activation data. seed: either a Nifti or Analyze image defining the boundaries of the seed, or a list of triples (x/y/z) defining the seed(s). Note that voxels do not need to be contiguous to define a seed--all supra- threshold voxels will be lumped together. threshold: optional float indicating the threshold above which voxels are considered to be part of the seed ROI (default = 0) r: optional integer indicating radius (in mm) of spheres to grow (only used if seed is a list of coordinates). output_dir: output directory to write to. Defaults to current. If none, defaults to using the first part of the seed filename. prefix: optional string to prepend to all coactivation images. Output: A set of meta-analysis images identical to that generated by meta.MetaAnalysis.
def cylrec(r, lon, z): """ Convert from cylindrical to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/cylrec_c.html :param r: Distance of a point from z axis. :type r: float :param lon: Angle (radians) of a point from xZ plane. :type lon: float :param z: Height of a point above xY plane. :type z: float :return: Rectangular coordinates of the point. :rtype: 3-Element Array of floats """ r = ctypes.c_double(r) lon = ctypes.c_double(lon) z = ctypes.c_double(z) rectan = stypes.emptyDoubleVector(3) libspice.cylrec_c(r, lon, z, rectan) return stypes.cVectorToPython(rectan)
Convert from cylindrical to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/cylrec_c.html :param r: Distance of a point from z axis. :type r: float :param lon: Angle (radians) of a point from xZ plane. :type lon: float :param z: Height of a point above xY plane. :type z: float :return: Rectangular coordinates of the point. :rtype: 3-Element Array of floats
def read(path, attribute, **kwargs): ''' Read the given attributes on the given file/directory :param str path: The file to get attributes from :param str attribute: The attribute to read :param bool hex: Return the values with forced hexadecimal values :return: A string containing the value of the named attribute :rtype: str :raises: CommandExecutionError on file not found, attribute not found, and any other unknown error CLI Example: .. code-block:: bash salt '*' xattr.read /path/to/file com.test.attr salt '*' xattr.read /path/to/file com.test.attr hex=True ''' kwargs = salt.utils.args.clean_kwargs(**kwargs) hex_ = kwargs.pop('hex', False) if kwargs: salt.utils.args.invalid_kwargs(kwargs) cmd = ['xattr', '-p'] if hex_: cmd.append('-x') cmd.extend([attribute, path]) try: ret = salt.utils.mac_utils.execute_return_result(cmd) except CommandExecutionError as exc: if 'No such file' in exc.strerror: raise CommandExecutionError('File not found: {0}'.format(path)) if 'No such xattr' in exc.strerror: raise CommandExecutionError('Attribute not found: {0}'.format(attribute)) raise CommandExecutionError('Unknown Error: {0}'.format(exc.strerror)) return ret
Read the given attributes on the given file/directory :param str path: The file to get attributes from :param str attribute: The attribute to read :param bool hex: Return the values with forced hexadecimal values :return: A string containing the value of the named attribute :rtype: str :raises: CommandExecutionError on file not found, attribute not found, and any other unknown error CLI Example: .. code-block:: bash salt '*' xattr.read /path/to/file com.test.attr salt '*' xattr.read /path/to/file com.test.attr hex=True
def ldap_server_host_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa") host = ET.SubElement(ldap_server, "host") hostname_key = ET.SubElement(host, "hostname") hostname_key.text = kwargs.pop('hostname') port = ET.SubElement(host, "port") port.text = kwargs.pop('port') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def bounding_polygon(self): """ Returns the bounding box polygon for this tile :return: `pywom.utils.geo.Polygon` instance """ lon_left, lat_bottom, lon_right, lat_top = Tile.tile_coords_to_bbox(self.x, self.y, self.zoom) print(lon_left, lat_bottom, lon_right, lat_top) return Polygon([[[lon_left, lat_top], [lon_right, lat_top], [lon_right, lat_bottom], [lon_left, lat_bottom], [lon_left, lat_top]]])
Returns the bounding box polygon for this tile :return: `pywom.utils.geo.Polygon` instance
def _build_host_livestate(self, host_name, livestate): # pylint: disable=no-self-use, too-many-locals """Build and notify the external command for an host livestate PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output> :param host_name: the concerned host name :param livestate: livestate dictionary :return: external command line """ state = livestate.get('state', 'UP').upper() output = livestate.get('output', '') long_output = livestate.get('long_output', '') perf_data = livestate.get('perf_data', '') try: timestamp = int(livestate.get('timestamp', 'ABC')) except ValueError: timestamp = None host_state_to_id = { "UP": 0, "DOWN": 1, "UNREACHABLE": 2 } parameters = '%s;%s' % (host_state_to_id.get(state, 3), output) if long_output and perf_data: parameters = '%s|%s\n%s' % (parameters, perf_data, long_output) elif long_output: parameters = '%s\n%s' % (parameters, long_output) elif perf_data: parameters = '%s|%s' % (parameters, perf_data) command_line = 'PROCESS_HOST_CHECK_RESULT;%s;%s' % (host_name, parameters) if timestamp is not None: command_line = '[%d] %s' % (timestamp, command_line) else: command_line = '[%d] %s' % (int(time.time()), command_line) return command_line
Build and notify the external command for an host livestate PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output> :param host_name: the concerned host name :param livestate: livestate dictionary :return: external command line
def _on_response(self, response): """Process all received Pub/Sub messages. For each message, send a modified acknowledgment request to the server. This prevents expiration of the message due to buffering by gRPC or proxy/firewall. This makes the server and client expiration timer closer to each other thus preventing the message being redelivered multiple times. After the messages have all had their ack deadline updated, execute the callback for each message using the executor. """ _LOGGER.debug( "Scheduling callbacks for %s messages.", len(response.received_messages) ) # Immediately modack the messages we received, as this tells the server # that we've received them. items = [ requests.ModAckRequest(message.ack_id, self._ack_histogram.percentile(99)) for message in response.received_messages ] self._dispatcher.modify_ack_deadline(items) for received_message in response.received_messages: message = google.cloud.pubsub_v1.subscriber.message.Message( received_message.message, received_message.ack_id, self._scheduler.queue ) # TODO: Immediately lease instead of using the callback queue. self._scheduler.schedule(self._callback, message)
Process all received Pub/Sub messages. For each message, send a modified acknowledgment request to the server. This prevents expiration of the message due to buffering by gRPC or proxy/firewall. This makes the server and client expiration timer closer to each other thus preventing the message being redelivered multiple times. After the messages have all had their ack deadline updated, execute the callback for each message using the executor.
def min(self, key=None): """ Find the minimum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0]) >>> rdd.min() 2.0 >>> rdd.min(key=str) 10.0 """ if key is None: return self.reduce(min) return self.reduce(lambda a, b: min(a, b, key=key))
Find the minimum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0]) >>> rdd.min() 2.0 >>> rdd.min(key=str) 10.0
def queryMulti(self, queries): """ Execute a series of Deletes,Inserts, & Updates in the Queires List @author: Nick Verbeck @since: 9/7/2008 """ self.lastError = None self.affectedRows = 0 self.rowcount = None self.record = None cursor = None try: try: self._GetConnection() #Execute query and store results cursor = self.conn.getCursor() for query in queries: self.conn.query = query if query.__class__ == [].__class__: self.affectedRows += cursor.execute(query[0], query[1]) else: self.affectedRows += cursor.execute(query) self.conn.updateCheckTime() except Exception, e: self.lastError = e finally: if cursor is not None: cursor.close() self._ReturnConnection() if self.lastError is not None: raise self.lastError else: return self.affectedRows
Execute a series of Deletes,Inserts, & Updates in the Queires List @author: Nick Verbeck @since: 9/7/2008
def next(self): """Trigger next agent to :py:meth:`~creamas.core.CreativeAgent.act` in the current step. """ # all agents acted, init next step t = time.time() if len(self._agents_to_act) == 0: self._init_step() addr = self._agents_to_act.pop(0) aiomas.run(until=self.env.trigger_act(addr=addr)) t2 = time.time() self._step_processing_time += t2 - t # all agents acted, finalize current step if len(self._agents_to_act) == 0: self._finalize_step()
Trigger next agent to :py:meth:`~creamas.core.CreativeAgent.act` in the current step.