Search is not available for this dataset
text
stringlengths
75
104k
def full_match(self, other): """Find the mapping between vertex indexes in self and other. This also works on disconnected graphs. Derived classes should just implement get_vertex_string and get_edge_string to make this method aware of the different nature of certain vertices. ...
def add_relation(self, source, destination): """Add new a relation to the bejection""" if self.in_sources(source): if self.forward[source] != destination: raise ValueError("Source is already in use. Destination does " "not match.") ...
def add_relations(self, relations): """Add multiple relations to a bijection""" for source, destination in relations: self.add_relation(source, destination)
def inverse(self): """Returns the inverse bijection.""" result = self.__class__() result.forward = copy.copy(self.reverse) result.reverse = copy.copy(self.forward) return result
def from_first_relation(cls, vertex0, vertex1): """Intialize a fresh match based on the first relation""" result = cls([(vertex0, vertex1)]) result.previous_ends1 = set([vertex1]) return result
def get_new_edges(self, subject_graph): """Get new edges from the subject graph for the graph search algorithm The Graph search algorithm extends the matches iteratively by adding matching vertices that are one edge further from the starting vertex at each iteration. ""...
def copy_with_new_relations(self, new_relations): """Create a new match object extended with new relations""" result = self.__class__(self.forward.items()) result.add_relations(new_relations.items()) result.previous_ends1 = set(new_relations.values()) return result
def _set_pattern_graph(self, pattern_graph): """Initialize the pattern_graph""" self.pattern_graph = pattern_graph self.level_edges = {} self.level_constraints = {} self.duplicate_checks = set([]) if pattern_graph is None: return if len(pattern_graph.i...
def iter_initial_relations(self, subject_graph): """Iterate over all valid initial relations for a match""" vertex0 = self.start_vertex for vertex1 in range(subject_graph.num_vertices): if self.compare(vertex0, vertex1, subject_graph): yield vertex0, vertex1
def get_new_edges(self, level): """Get new edges from the pattern graph for the graph search algorithm The level argument denotes the distance of the new edges from the starting vertex in the pattern graph. """ return ( self.level_edges.get(level, []), ...
def check_next_match(self, match, new_relations, subject_graph, one_match): """Check if the (onset for a) match can be a valid""" # only returns true for ecaxtly one set of new_relations from all the # ones that are symmetrically equivalent if not (self.criteria_sets is None or one_match...
def iter_final_matches(self, canonical_match, subject_graph, one_match): """Given a match, iterate over all related equivalent matches When criteria sets are defined, the iterator runs over all symmetric equivalent matches that fulfill one of the criteria sets. When not criteri...
def get_closed_cycles(self): """Return the closed cycles corresponding to this permutation The cycle will be normalized to facilitate the elimination of duplicates. The following is guaranteed: 1) If this permutation is represented by disconnected cycles, the cyc...
def iter_initial_relations(self, subject_graph): """Iterate over all valid initial relations for a match""" if self.pattern_graph.num_edges != subject_graph.num_edges: return # don't even try for pair in CustomPattern.iter_initial_relations(self, subject_graph): yield pai...
def compare(self, vertex0, vertex1, subject_graph): """Returns true when the two vertices are of the same kind""" return ( self.pattern_graph.vertex_fingerprints[vertex0] == subject_graph.vertex_fingerprints[vertex1] ).all()
def iter_initial_relations(self, subject_graph): """Iterate over all valid initial relations for a match""" vertex0 = 0 for vertex1 in range(subject_graph.num_vertices): yield vertex0, vertex1
def get_new_edges(self, level): """Get new edges from the pattern graph for the graph search algorithm The level argument denotes the distance of the new edges from the starting vertex in the pattern graph. """ if level == 0: edges0 = [(0, 1), (0, 2)] e...
def check_next_match(self, match, new_relations, subject_graph, one_match): """Check if the (onset for a) match can be a valid (part of a) ring""" # avoid duplicate rings (order of traversal) if len(match) == 3: if match.forward[1] < match.forward[2]: #print "RingPatt...
def complete(self, match, subject_graph): """Check the completeness of a ring match""" size = len(match) # check whether we have an odd strong ring if match.forward[size-1] in subject_graph.neighbors[match.forward[size-2]]: # we have an odd closed cycle. check if this is a st...
def print_debug(self, text, indent=0): """Only prints debug info on screen when self.debug == True.""" if self.debug: if indent > 0: print(" "*self.debug, text) self.debug += indent if indent <= 0: print(" "*self.debug, text)
def _iter_candidate_groups(self, init_match, edges0, edges1): """Divide the edges into groups""" # collect all end vertices0 and end vertices1 that belong to the same # group. sources = {} for start_vertex0, end_vertex0 in edges0: l = sources.setdefault(start_vertex0,...
def _iter_new_relations(self, init_match, subject_graph, edges0, constraints0, edges1): """Given an onset for a match, iterate over all possible new key-value pairs""" # Count the number of unique edges0[i][1] values. This is also # the number of new relations. num_new_relations = len(se...
def _iter_matches(self, input_match, subject_graph, one_match, level=0): """Given an onset for a match, iterate over all completions of that match This iterator works recursively. At each level the match is extended with a new set of relations based on vertices in the pattern graph ...
def dump_pdb(filename, molecule, atomnames=None, resnames=None, chain_ids=None, occupancies=None, betas=None): """Writes a single molecule to a pdb file. This function is based on the pdb file specification: http://www.wwpdb.org/documentation/format32/sect9.html For convenience, the relevant t...
def load_pdb(filename): """Loads a single molecule from a pdb file. This function does support only a small fragment from the pdb specification. It assumes that there is only one molecular geometry in the pdb file. """ with open(filename) as f: numbers = [] coordinates = [] ...
def zmat_to_cart(zmat): """Converts a ZMatrix back to cartesian coordinates.""" numbers = zmat["number"] N = len(numbers) coordinates = np.zeros((N, 3), float) # special cases for the first coordinates coordinates[1, 2] = zmat["distance"][1] if zmat["rel1"][2] == 1: sign = -1 e...
def _get_new_ref(self, existing_refs): """Get a new reference atom for a row in the ZMatrix The reference atoms should obey the following conditions: - They must be different - They must be neighbours in the bond graph - They must have an index lower than the c...
def cart_to_zmat(self, coordinates): """Convert cartesian coordinates to ZMatrix format Argument: coordinates -- Cartesian coordinates (numpy array Nx3) The coordinates must match with the graph that was used to initialize the ZMatrixGenerator object. ""...
def can_map_ipa_string(self, ipa_string): """ Return ``True`` if the mapper can map all the IPA characters in the given IPA string. :param IPAString ipa_string: the IPAString to be parsed :rtype: bool """ canonical = [(c.canonical_representation, ) for c in ipa_s...
def map_ipa_string(self, ipa_string, ignore=False, return_as_list=False, return_can_map=False): """ Convert the given IPAString to a string containing the corresponding ASCII IPA representation. :param IPAString ipa_string: the IPAString to be parsed :param bool ignore: if ``Tru...
def map_unicode_string(self, unicode_string, ignore=False, single_char_parsing=False, return_as_list=False, return_can_map=False): """ Convert the given Unicode string, representing an IPA string, to a string containing the corresponding mapped representation. Return ``None`` if ``unico...
def print_invalid_chars(invalid_chars, vargs): """ Print Unicode characterss that are not IPA valid, if requested by the user. :param list invalid_chars: a list (possibly empty) of invalid Unicode characters :param dict vargs: the command line parameters """ if len(invalid_chars) > 0: ...
def command_canonize(string, vargs): """ Print the canonical representation of the given string. It will replace non-canonical compound characters with their canonical synonym. :param str string: the string to act upon :param dict vargs: the command line arguments """ try: ipa...
def command_chars(string, vargs): """ Print a list of all IPA characters in the given string. It will print the Unicode representation, the full IPA name, and the Unicode "U+"-prefixed hexadecimal codepoint representation of each IPA character. :param str string: the string to act upon :pa...
def command_check(string, vargs): """ Check if the given string is IPA valid. If the given string is not IPA valid, print the invalid characters. :param str string: the string to act upon :param dict vargs: the command line arguments """ is_valid = is_valid_ipa(string) print(is_val...
def command_clean(string, vargs): """ Remove characters that are not IPA valid from the given string, and print the remaining string. :param str string: the string to act upon :param dict vargs: the command line arguments """ valid_chars, invalid_chars = remove_invalid_ipa_characters( ...
def command_u2a(string, vargs): """ Print the ARPABEY ASCII string corresponding to the given Unicode IPA string. :param str string: the string to act upon :param dict vargs: the command line arguments """ try: l = ARPABETMapper().map_unicode_string( unicode_string=string, ...
def command_u2k(string, vargs): """ Print the Kirshenbaum ASCII string corresponding to the given Unicode IPA string. :param str string: the string to act upon :param dict vargs: the command line arguments """ try: l = KirshenbaumMapper().map_unicode_string( unicode_string=...
def main(): """ Entry point. """ parser = argparse.ArgumentParser(description=DESCRIPTION) for arg in ARGUMENTS: if "action" in arg: if arg["short"] is not None: parser.add_argument(arg["short"], arg["long"], action=arg["action"], help=arg["help"]) els...
def ipa_chars(self, value): """ Set the list of IPAChar objects composing the IPA string :param list value: list of IPAChar objects """ if value is None: self.__ipa_chars = [] else: if is_list_of_ipachars(value): self.__ipa_chars =...
def is_equivalent(self, other, ignore=False): """ Return ``True`` if the IPA string is equivalent to the ``other`` object. The ``other`` object can be: 1. a Unicode string, 2. a list of IPAChar objects, and 3. another IPAString. :param variant other: the object...
def canonical_representation(self): """ Return a new IPAString, containing the canonical representation of the current string, that is, the one composed by the (prefix) minimum number of IPAChar objects. :rtype: IPAString """ return IPAString(unicode_string=u"".join([c._...
def filter_chars(self, chars=u""): """ Return a new IPAString, containing only the IPA characters specified by the ``chars`` string. Valid values for ``chars`` are: * ``consonants`` or ``cns`` * ``vowels`` or ``vwl`` * ``letters`` or ``cns_vwl`` * ``cns_...
def consonants(self): """ Return a new IPAString, containing only the consonants in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if c.is_consonant])
def vowels(self): """ Return a new IPAString, containing only the vowels in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if c.is_vowel])
def cns_vwl(self): """ Return a new IPAString, containing only: 1. the consonants, and 2. the vowels in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if c.is_letter])
def cns_vwl_pstr(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the primary stress diacritics in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_char...
def cns_vwl_str(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.i...
def cns_vwl_str_len(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, and 4. the length diacritics in the current string. :rtype: IPAString """ return IPAString(ip...
def cns_vwl_pstr_long(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the primary stress diacritics, and 4. the long suprasegmental in the current string. :rtype: IPAString """ return ...
def cns_vwl_str_len_wb(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, 4. the length diacritics, and 5. the word breaks in the current string. :rtype: IPAString ...
def cns_vwl_str_len_wb_sb(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, 4. the length diacritics, 5. the word breaks, and 6. the syllable breaks in the current string. ...
def convert_unicode_field(string): """ Convert a Unicode field into the corresponding list of Unicode strings. The (input) Unicode field is a Unicode string containing one or more Unicode codepoints (``xxxx`` or ``U+xxxx`` or ``xxxx_yyyy``), separated by a space. :param str string: the (input)...
def convert_ascii_field(string): """ Convert an ASCII field into the corresponding list of Unicode strings. The (input) ASCII field is a Unicode string containing one or more ASCII codepoints (``00xx`` or ``U+00xx`` or an ASCII string not starting with ``00`` or ``U+``), separated by a space. ...
def convert_raw_tuple(value_tuple, format_string): """ Convert a tuple of raw values, according to the given line format. :param tuple value_tuple: the tuple of raw values :param str format_string: the format of the tuple :rtype: list of tuples """ values = [] for v, c in zip(value_tup...
def load_data_file( file_path, file_path_is_relative=False, comment_string=DATA_FILE_COMMENT, field_separator=DATA_FILE_FIELD_SEPARATOR, line_format=None ): """ Load a data file, with one record per line and fields separated by ``field_separator``, returning a list of tuples. It...
def load_ipa_data(): """ Load the IPA data from the built-in IPA database, creating the following globals: 1. ``IPA_CHARS``: list of all IPAChar objects 2. ``UNICODE_TO_IPA``: dict mapping a Unicode string (often, a single char) to an IPAChar 3. ``UNICODE_TO_IPA_MAX_KEY_LENGTH``: length of a longes...
def split_using_dictionary(string, dictionary, max_key_length, single_char_parsing=False): """ Return a list of (non-empty) substrings of the given string, where each substring is either: 1. the longest string starting at the current index that is a key in the dictionary, or 2. a single ...
def ipa_substrings(unicode_string, single_char_parsing=False): """ Return a list of (non-empty) substrings of the given string, where each substring is either: 1. the longest Unicode string starting at the current index representing a (known) valid IPA character, or 2. a single Unicode c...
def invalid_ipa_characters(unicode_string, indices=False): """ Return the list of Unicode characters in the given Unicode string that are not IPA valid. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool indices: if ``T...
def remove_invalid_ipa_characters(unicode_string, return_invalid=False, single_char_parsing=False): """ Remove all Unicode characters that are not IPA valid from the given string, and return a list of substrings of the given string, each mapping to a (known) valid IPA character. Return ``None``...
def variant_to_list(obj): """ Return a list containing the descriptors in the given object. The ``obj`` can be a list or a set of descriptor strings, or a Unicode string. If ``obj`` is a Unicode string, it will be split using spaces as delimiters. :param variant obj: the object to be parsed ...
def variant_to_canonical_string(obj): """ Return a list containing the canonical string for the given object. The ``obj`` can be a list or a set of descriptor strings, or a Unicode string. If ``obj`` is a Unicode string, it will be split using spaces as delimiters. :param variant obj: the obj...
def is_list_of_ipachars(obj): """ Return ``True`` if the given object is a list of IPAChar objects. :param object obj: the object to test :rtype: bool """ if isinstance(obj, list): for e in obj: if not isinstance(e, IPAChar): return False return True ...
def is_equivalent(self, other): """ Return ``True`` if the IPA character is equivalent to the ``other`` object. The ``other`` object can be: 1. a Unicode string, containing the representation of the IPA character, 2. a Unicode string, containing a space-separated list of descri...
def dg_value(self, descriptor_group): """ Return the canonical value of a descriptor of the character, provided it is present in the given descriptor group. If not present, return ``None``. :param IPADescriptorGroup descriptor_group: the descriptor group to be checked against ...
def has_descriptor(self, descriptor): """ Return ``True`` if the character has the given descriptor. :param IPADescriptor descriptor: the descriptor to be checked against :rtype: bool """ for p in self.descriptors: if p in descriptor: return T...
def voicing(self, value): """ Set the voicing of the consonant. :param str value: the value to be set """ if (value is not None) and (not value in DG_C_VOICING): raise ValueError("Unrecognized value for voicing: '%s'" % value) self.__voicing = value
def place(self, value): """ Set the place of articulation of the consonant. :param str value: the value to be set """ if (value is not None) and (not value in DG_C_PLACE): raise ValueError("Unrecognized value for place: '%s'" % value) self.__place = value
def manner(self, value): """ Set the manner of articulation of the consonant. :param str value: the value to be set """ if (value is not None) and (not value in DG_C_MANNER): raise ValueError("Unrecognized value for manner: '%s'" % value) self.__manner = valu...
def height(self, value): """ Set the height of the vowel. :param str value: the value to be set """ if (value is not None) and (not value in DG_V_HEIGHT): raise ValueError("Unrecognized value for height: '%s'" % value) self.__height = value
def backness(self, value): """ Set the backness of the vowel. :param str value: the value to be set """ if (value is not None) and (not value in DG_V_BACKNESS): raise ValueError("Unrecognized value for backness: '%s'" % value) self.__backness = value
def roundness(self, value): """ Set the roundness of the vowel. :param str value: the value to be set """ if (value is not None) and (not value in DG_V_ROUNDNESS): raise ValueError("Unrecognized value for roundness: '%s'" % value) self.__roundness = value
def _load_data(self): """ Load the Kirshenbaum ASCII IPA data from the built-in database. """ ipa_canonical_string_to_ascii_str = dict() for line in load_data_file( file_path=self.DATA_FILE_PATH, file_path_is_relative=True, line_format=u"sxA" ...
def canonical_value(self, query): """ Return the canonical value corresponding to the given query value. Return ``None`` if the query value is not present in any descriptor of the group. :param str query: the descriptor value to be checked against """ for d in self.desc...
def _load_data(self): """ Load the ARPABET ASCII IPA data from the built-in database. """ ipa_canonical_string_to_ascii_str = dict() for line in load_data_file( file_path=self.DATA_FILE_PATH, file_path_is_relative=True, line_format=u"UA" ...
def is_unicode_string(string): """ Return ``True`` if the given string is a Unicode string, that is, of type ``unicode`` in Python 2 or ``str`` in Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to be checked :rtype: bool """ if string is None: ...
def to_unicode_string(string): """ Return a Unicode string out of the given string. On Python 2, it calls ``unicode`` with ``utf-8`` encoding. On Python 3, it just returns the given string. Return ``None`` if ``string`` is ``None``. :param str string: the string to convert to Unicode ...
def to_str(string): """ Return the given string (either byte string or Unicode string) converted to native-str, that is, a byte string on Python 2, or a Unicode string on Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to convert to native-str :rtype: nat...
def hex_to_unichr(hex_string): """ Return the Unicode character with the given codepoint, given as an hexadecimal string. Return ``None`` if ``hex_string`` is ``None`` or is empty. Example:: "0061" => a "U+0061" => a :param str hex_string: the Unicode codepoint of the desire...
def unicode_to_hex(unicode_string): """ Return a string containing the Unicode hexadecimal codepoint of each Unicode character in the given Unicode string. Return ``None`` if ``unicode_string`` is ``None``. Example:: a => U+0061 ab => U+0061 U+0062 :param str unicode_string: ...
def parse_html(html): """ Create an lxml.html.HtmlElement from a string with html. XXX: mostly copy-pasted from parsel.selector.create_root_node """ body = html.strip().replace('\x00', '').encode('utf8') or b'<html/>' parser = lxml.html.HTMLParser(recover=True, encoding='utf8') root = lxml.etree...
def etree_to_text(tree, guess_punct_space=True, guess_layout=True, newline_tags=NEWLINE_TAGS, double_newline_tags=DOUBLE_NEWLINE_TAGS): """ Convert a html tree to text. Tree should be cleaned with ``html_text.html_text.cleaner.clean_htm...
def selector_to_text(sel, guess_punct_space=True, guess_layout=True): """ Convert a cleaned parsel.Selector to text. See html_text.extract_text docstring for description of the approach and options. """ import parsel if isinstance(sel, parsel.SelectorList): # if selecting a specific xpat...
def cleaned_selector(html): """ Clean parsel.selector. """ import parsel try: tree = _cleaned_html_tree(html) sel = parsel.Selector(root=tree, type='html') except (lxml.etree.XMLSyntaxError, lxml.etree.ParseError, lxml.etree.ParserError, UnicodeEnc...
def extract_text(html, guess_punct_space=True, guess_layout=True, newline_tags=NEWLINE_TAGS, double_newline_tags=DOUBLE_NEWLINE_TAGS): """ Convert html to text, cleaning invisible content such as styles. Almost the same as normalize-space ...
def get_json(self, layer, where="1 = 1", fields=[], count_only=False, srid='4326'): """ Gets the JSON file from ArcGIS """ params = { 'where': where, 'outFields': ", ".join(fields), 'returnGeometry': True, 'outSR': srid, ...
def get_descriptor_for_layer(self, layer): """ Returns the standard JSON descriptor for the layer. There is a lot of usefule information in there. """ if not layer in self._layer_descriptor_cache: params = {'f': 'pjson'} if self.token: para...
def enumerate_layer_fields(self, layer): """ Pulls out all of the field names for a layer. """ descriptor = self.get_descriptor_for_layer(layer) return [field['name'] for field in descriptor['fields']]
def get(self, layer, where="1 = 1", fields=[], count_only=False, srid='4326'): """ Gets a layer and returns it as honest to God GeoJSON. WHERE 1 = 1 causes us to get everything. We use OBJECTID in the WHERE clause to paginate, so don't use OBJECTID in your WHERE clause unless you're goi...
def getTable(self, layer, where="1 = 1", fields=[], jsobj=None): """ Returns JSON for a Table type. You shouldn't use this directly -- it's an automatic falback from .get if there is no geometry """ base_where = where features = [] # We always want to run once, an...
def getMultiple(self, layers, where="1 = 1", fields=[], srid='4326', layer_name_field=None): """ Get a bunch of layers and concatenate them together into one. This is useful if you have a map with layers for, say, every year named stuff_2014, stuff_2013, stuff_2012. Etc. Optionally, you...
def get_version(extension, workflow_file): '''Determines the version of a .py, .wdl, or .cwl file.''' if extension == 'py' and two_seven_compatible(workflow_file): return '2.7' elif extension == 'cwl': return yaml.load(open(workflow_file))['cwlVersion'] else: # Must be a wdl file. ...
def wf_info(workflow_path): """ Returns the version of the file and the file extension. Assumes that the file path is to the file directly ie, ends with a valid file extension.Supports checking local files as well as files at http:// and https:// locations. Files at these remote locations are recreated...
def modify_jsonyaml_paths(jsonyaml_file): """ Changes relative paths in a json/yaml file to be relative to where the json/yaml file is located. :param jsonyaml_file: Path to a json/yaml file. """ loader = schema_salad.ref_resolver.Loader({ "location": {"@type": "@id"}, "path": {...
def build_wes_request(workflow_file, json_path, attachments=None): """ :param str workflow_file: Path to cwl/wdl file. Can be http/https/file. :param json_path: Path to accompanying json file. :param attachments: Any other files needing to be uploaded to the server. :return: A list of tuples forma...
def get_service_info(self): """ Get information about Workflow Execution Service. May include information related (but not limited to) the workflow descriptor formats, versions supported, the WES API versions supported, and information about general the service availabili...
def run(self, wf, jsonyaml, attachments): """ Composes and sends a post request that signals the wes server to run a workflow. :param str workflow_file: A local/http/https path to a cwl/wdl/python workflow file. :param str jsonyaml: A local path to a json or yaml file. :param li...
def cancel(self, run_id): """ Cancel a running workflow. :param run_id: String (typically a uuid) identifying the run. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port where the post request...
def get_run_log(self, run_id): """ Get detailed info about a running workflow. :param run_id: String (typically a uuid) identifying the run. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port ...