id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
170,710
import os import re import shelve import sys import nltk.data The provided code snippet includes necessary dependencies for implementing the `val_load` function. Write a Python function `def val_load(db)` to solve the following problem: Load a ``Valuation`` from a persistent database. :param db: name of file from which data is read. The suffix '.db' should be omitted from the name. :type db: str Here is the function: def val_load(db): """ Load a ``Valuation`` from a persistent database. :param db: name of file from which data is read. The suffix '.db' should be omitted from the name. :type db: str """ dbname = db + ".db" if not os.access(dbname, os.R_OK): sys.exit("Cannot read file: %s" % dbname) else: db_in = shelve.open(db) from nltk.sem import Valuation val = Valuation(db_in) # val.read(db_in.items()) return val
Load a ``Valuation`` from a persistent database. :param db: name of file from which data is read. The suffix '.db' should be omitted from the name. :type db: str
170,711
import os import re import shelve import sys import nltk.data def sql_query(dbname, query): """ Execute an SQL query over a database. :param dbname: filename of persistent store :type schema: str :param query: SQL query :type rel_name: str """ import sqlite3 try: path = nltk.data.find(dbname) connection = sqlite3.connect(str(path)) cur = connection.cursor() return cur.execute(query) except (ValueError, sqlite3.OperationalError): import warnings warnings.warn( "Make sure the database file %s is installed and uncompressed." % dbname ) raise The provided code snippet includes necessary dependencies for implementing the `sql_demo` function. Write a Python function `def sql_demo()` to solve the following problem: Print out every row from the 'city.db' database. Here is the function: def sql_demo(): """ Print out every row from the 'city.db' database. """ print() print("Using SQL to extract rows from 'city.db' RDB.") for row in sql_query("corpora/city_database/city.db", "SELECT * FROM city_table"): print(row)
Print out every row from the 'city.db' database.
170,712
import operator from functools import reduce from itertools import chain from nltk.sem.logic import ( APP, AbstractVariableExpression, AllExpression, AndExpression, ApplicationExpression, BinaryExpression, BooleanExpression, ConstantExpression, EqualityExpression, EventVariableExpression, ExistsExpression, Expression, FunctionVariableExpression, ImpExpression, IndividualVariableExpression, LambdaExpression, LogicParser, NegatedExpression, OrExpression, Tokens, Variable, is_eventvar, is_funcvar, is_indvar, unique_variable, ) class DrtIndividualVariableExpression( DrtAbstractVariableExpression, IndividualVariableExpression ): pass class DrtFunctionVariableExpression( DrtAbstractVariableExpression, FunctionVariableExpression ): pass class DrtEventVariableExpression( DrtIndividualVariableExpression, EventVariableExpression ): pass class DrtConstantExpression(DrtAbstractVariableExpression, ConstantExpression): pass def is_indvar(expr): """ An individual variable must be a single lowercase character other than 'e', followed by zero or more digits. :param expr: str :return: bool True if expr is of the correct form """ assert isinstance(expr, str), "%s is not a string" % expr return re.match(r"^[a-df-z]\d*$", expr) is not None def is_funcvar(expr): """ A function variable must be a single uppercase character followed by zero or more digits. :param expr: str :return: bool True if expr is of the correct form """ assert isinstance(expr, str), "%s is not a string" % expr return re.match(r"^[A-Z]\d*$", expr) is not None def is_eventvar(expr): """ An event variable must be a single lowercase 'e' character followed by zero or more digits. :param expr: str :return: bool True if expr is of the correct form """ assert isinstance(expr, str), "%s is not a string" % expr return re.match(r"^e\d*$", expr) is not None The provided code snippet includes necessary dependencies for implementing the `DrtVariableExpression` function. Write a Python function `def DrtVariableExpression(variable)` to solve the following problem: This is a factory method that instantiates and returns a subtype of ``DrtAbstractVariableExpression`` appropriate for the given variable. Here is the function: def DrtVariableExpression(variable): """ This is a factory method that instantiates and returns a subtype of ``DrtAbstractVariableExpression`` appropriate for the given variable. """ if is_indvar(variable.name): return DrtIndividualVariableExpression(variable) elif is_funcvar(variable.name): return DrtFunctionVariableExpression(variable) elif is_eventvar(variable.name): return DrtEventVariableExpression(variable) else: return DrtConstantExpression(variable)
This is a factory method that instantiates and returns a subtype of ``DrtAbstractVariableExpression`` appropriate for the given variable.
170,713
import operator from functools import reduce from itertools import chain from nltk.sem.logic import ( APP, AbstractVariableExpression, AllExpression, AndExpression, ApplicationExpression, BinaryExpression, BooleanExpression, ConstantExpression, EqualityExpression, EventVariableExpression, ExistsExpression, Expression, FunctionVariableExpression, ImpExpression, IndividualVariableExpression, LambdaExpression, LogicParser, NegatedExpression, OrExpression, Tokens, Variable, is_eventvar, is_funcvar, is_indvar, unique_variable, ) def _pad_vertically(lines, max_lines): pad_line = [" " * len(lines[0])] return lines + pad_line * (max_lines - len(lines))
null
170,714
import operator from functools import reduce from itertools import chain from nltk.sem.logic import ( APP, AbstractVariableExpression, AllExpression, AndExpression, ApplicationExpression, BinaryExpression, BooleanExpression, ConstantExpression, EqualityExpression, EventVariableExpression, ExistsExpression, Expression, FunctionVariableExpression, ImpExpression, IndividualVariableExpression, LambdaExpression, LogicParser, NegatedExpression, OrExpression, Tokens, Variable, is_eventvar, is_funcvar, is_indvar, unique_variable, ) class DrtExpression: """ This is the base abstract DRT Expression from which every DRT Expression extends. """ _drt_parser = DrtParser() def fromstring(cls, s): return cls._drt_parser.parse(s) def applyto(self, other): return DrtApplicationExpression(self, other) def __neg__(self): return DrtNegatedExpression(self) def __and__(self, other): return NotImplemented def __or__(self, other): assert isinstance(other, DrtExpression) return DrtOrExpression(self, other) def __gt__(self, other): assert isinstance(other, DrtExpression) if isinstance(self, DRS): return DRS(self.refs, self.conds, other) if isinstance(self, DrtConcatenation): return DrtConcatenation(self.first, self.second, other) raise Exception("Antecedent of implication must be a DRS") def equiv(self, other, prover=None): """ Check for logical equivalence. Pass the expression (self <-> other) to the theorem prover. If the prover says it is valid, then the self and other are equal. :param other: an ``DrtExpression`` to check equality against :param prover: a ``nltk.inference.api.Prover`` """ assert isinstance(other, DrtExpression) f1 = self.simplify().fol() f2 = other.simplify().fol() return f1.equiv(f2, prover) def type(self): raise AttributeError( "'%s' object has no attribute 'type'" % self.__class__.__name__ ) def typecheck(self, signature=None): raise NotImplementedError() def __add__(self, other): return DrtConcatenation(self, other, None) def get_refs(self, recursive=False): """ Return the set of discourse referents in this DRS. :param recursive: bool Also find discourse referents in subterms? :return: list of ``Variable`` objects """ raise NotImplementedError() def is_pronoun_function(self): """Is self of the form "PRO(x)"?""" return ( isinstance(self, DrtApplicationExpression) and isinstance(self.function, DrtAbstractVariableExpression) and self.function.variable.name == DrtTokens.PRONOUN and isinstance(self.argument, DrtIndividualVariableExpression) ) def make_EqualityExpression(self, first, second): return DrtEqualityExpression(first, second) def make_VariableExpression(self, variable): return DrtVariableExpression(variable) def resolve_anaphora(self): return resolve_anaphora(self) def eliminate_equality(self): return self.visit_structured(lambda e: e.eliminate_equality(), self.__class__) def pretty_format(self): """ Draw the DRS :return: the pretty print string """ return "\n".join(self._pretty()) def pretty_print(self): print(self.pretty_format()) def draw(self): DrsDrawer(self).draw() def resolve_anaphora(expression, trail=[]): if isinstance(expression, ApplicationExpression): if expression.is_pronoun_function(): possible_antecedents = PossibleAntecedents() for ancestor in trail: for ref in ancestor.get_refs(): refex = expression.make_VariableExpression(ref) # ========================================================== # Don't allow resolution to itself or other types # ========================================================== if refex.__class__ == expression.argument.__class__ and not ( refex == expression.argument ): possible_antecedents.append(refex) if len(possible_antecedents) == 1: resolution = possible_antecedents[0] else: resolution = possible_antecedents return expression.make_EqualityExpression(expression.argument, resolution) else: r_function = resolve_anaphora(expression.function, trail + [expression]) r_argument = resolve_anaphora(expression.argument, trail + [expression]) return expression.__class__(r_function, r_argument) elif isinstance(expression, DRS): r_conds = [] for cond in expression.conds: r_cond = resolve_anaphora(cond, trail + [expression]) # if the condition is of the form '(x = [])' then raise exception if isinstance(r_cond, EqualityExpression): if isinstance(r_cond.first, PossibleAntecedents): # Reverse the order so that the variable is on the left temp = r_cond.first r_cond.first = r_cond.second r_cond.second = temp if isinstance(r_cond.second, PossibleAntecedents): if not r_cond.second: raise AnaphoraResolutionException( "Variable '%s' does not " "resolve to anything." % r_cond.first ) r_conds.append(r_cond) if expression.consequent: consequent = resolve_anaphora(expression.consequent, trail + [expression]) else: consequent = None return expression.__class__(expression.refs, r_conds, consequent) elif isinstance(expression, AbstractVariableExpression): return expression elif isinstance(expression, NegatedExpression): return expression.__class__( resolve_anaphora(expression.term, trail + [expression]) ) elif isinstance(expression, DrtConcatenation): if expression.consequent: consequent = resolve_anaphora(expression.consequent, trail + [expression]) else: consequent = None return expression.__class__( resolve_anaphora(expression.first, trail + [expression]), resolve_anaphora(expression.second, trail + [expression]), consequent, ) elif isinstance(expression, BinaryExpression): return expression.__class__( resolve_anaphora(expression.first, trail + [expression]), resolve_anaphora(expression.second, trail + [expression]), ) elif isinstance(expression, LambdaExpression): return expression.__class__( expression.variable, resolve_anaphora(expression.term, trail + [expression]) ) class Variable: def __init__(self, name): """ :param name: the name of the variable """ assert isinstance(name, str), "%s is not a string" % name self.name = name def __eq__(self, other): return isinstance(other, Variable) and self.name == other.name def __ne__(self, other): return not self == other def __lt__(self, other): if not isinstance(other, Variable): raise TypeError return self.name < other.name def substitute_bindings(self, bindings): return bindings.get(self, self) def __hash__(self): return hash(self.name) def __str__(self): return self.name def __repr__(self): return "Variable('%s')" % self.name def demo(): print("=" * 20 + "TEST PARSE" + "=" * 20) dexpr = DrtExpression.fromstring print(dexpr(r"([x,y],[sees(x,y)])")) print(dexpr(r"([x],[man(x), walks(x)])")) print(dexpr(r"\x.\y.([],[sees(x,y)])")) print(dexpr(r"\x.([],[walks(x)])(john)")) print(dexpr(r"(([x],[walks(x)]) + ([y],[runs(y)]))")) print(dexpr(r"(([],[walks(x)]) -> ([],[runs(x)]))")) print(dexpr(r"([x],[PRO(x), sees(John,x)])")) print(dexpr(r"([x],[man(x), -([],[walks(x)])])")) print(dexpr(r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])")) print("=" * 20 + "Test fol()" + "=" * 20) print(dexpr(r"([x,y],[sees(x,y)])").fol()) print("=" * 20 + "Test alpha conversion and lambda expression equality" + "=" * 20) e1 = dexpr(r"\x.([],[P(x)])") print(e1) e2 = e1.alpha_convert(Variable("z")) print(e2) print(e1 == e2) print("=" * 20 + "Test resolve_anaphora()" + "=" * 20) print(resolve_anaphora(dexpr(r"([x,y,z],[dog(x), cat(y), walks(z), PRO(z)])"))) print( resolve_anaphora(dexpr(r"([],[(([x],[dog(x)]) -> ([y],[walks(y), PRO(y)]))])")) ) print(resolve_anaphora(dexpr(r"(([x,y],[]) + ([],[PRO(x)]))"))) print("=" * 20 + "Test pretty_print()" + "=" * 20) dexpr(r"([],[])").pretty_print() dexpr( r"([],[([x],[big(x), dog(x)]) -> ([],[bark(x)]) -([x],[walk(x)])])" ).pretty_print() dexpr(r"([x,y],[x=y]) + ([z],[dog(z), walk(z)])").pretty_print() dexpr(r"([],[([x],[]) | ([y],[]) | ([z],[dog(z), walk(z)])])").pretty_print() dexpr(r"\P.\Q.(([x],[]) + P(x) + Q(x))(\x.([],[dog(x)]))").pretty_print()
null
170,715
import operator from functools import reduce from itertools import chain from nltk.sem.logic import ( APP, AbstractVariableExpression, AllExpression, AndExpression, ApplicationExpression, BinaryExpression, BooleanExpression, ConstantExpression, EqualityExpression, EventVariableExpression, ExistsExpression, Expression, FunctionVariableExpression, ImpExpression, IndividualVariableExpression, LambdaExpression, LogicParser, NegatedExpression, OrExpression, Tokens, Variable, is_eventvar, is_funcvar, is_indvar, unique_variable, ) try: from tkinter import Canvas, Tk from tkinter.font import Font from nltk.util import in_idle except ImportError: # No need to print a warning here, nltk.draw has already printed one. pass class DrtExpression: def fromstring(cls, s): def applyto(self, other): def __neg__(self): def __and__(self, other): def __or__(self, other): def __gt__(self, other): def equiv(self, other, prover=None): def type(self): def typecheck(self, signature=None): def __add__(self, other): def get_refs(self, recursive=False): def is_pronoun_function(self): def make_EqualityExpression(self, first, second): def make_VariableExpression(self, variable): def resolve_anaphora(self): def eliminate_equality(self): def pretty_format(self): def pretty_print(self): def draw(self): def test_draw(): try: from tkinter import Tk except ImportError as e: raise ValueError("tkinter is required, but it's not available.") expressions = [ r"x", r"([],[])", r"([x],[])", r"([x],[man(x)])", r"([x,y],[sees(x,y)])", r"([x],[man(x), walks(x)])", r"\x.([],[man(x), walks(x)])", r"\x y.([],[sees(x,y)])", r"([],[(([],[walks(x)]) + ([],[runs(x)]))])", r"([x],[man(x), -([],[walks(x)])])", r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])", ] for e in expressions: d = DrtExpression.fromstring(e) d.draw()
null
170,716
from itertools import chain from nltk.internals import Counter class FStructure(dict): def safeappend(self, key, item): """ Append 'item' to the list at 'key'. If no list exists for 'key', then construct one. """ if key not in self: self[key] = [] self[key].append(item) def __setitem__(self, key, value): dict.__setitem__(self, key.lower(), value) def __getitem__(self, key): return dict.__getitem__(self, key.lower()) def __contains__(self, key): return dict.__contains__(self, key.lower()) def to_glueformula_list(self, glue_dict): depgraph = self.to_depgraph() return glue_dict.to_glueformula_list(depgraph) def to_depgraph(self, rel=None): from nltk.parse.dependencygraph import DependencyGraph depgraph = DependencyGraph() nodes = depgraph.nodes self._to_depgraph(nodes, 0, "ROOT") # Add all the dependencies for all the nodes for address, node in nodes.items(): for n2 in (n for n in nodes.values() if n["rel"] != "TOP"): if n2["head"] == address: relation = n2["rel"] node["deps"].setdefault(relation, []) node["deps"][relation].append(n2["address"]) depgraph.root = nodes[1] return depgraph def _to_depgraph(self, nodes, head, rel): index = len(nodes) nodes[index].update( { "address": index, "word": self.pred[0], "tag": self.pred[1], "head": head, "rel": rel, } ) for feature in sorted(self): for item in sorted(self[feature]): if isinstance(item, FStructure): item._to_depgraph(nodes, index, feature) elif isinstance(item, tuple): new_index = len(nodes) nodes[new_index].update( { "address": new_index, "word": item[0], "tag": item[1], "head": index, "rel": feature, } ) elif isinstance(item, list): for n in item: n._to_depgraph(nodes, index, feature) else: raise Exception( "feature %s is not an FStruct, a list, or a tuple" % feature ) def read_depgraph(depgraph): return FStructure._read_depgraph(depgraph.root, depgraph) def _read_depgraph(node, depgraph, label_counter=None, parent=None): if not label_counter: label_counter = Counter() if node["rel"].lower() in ["spec", "punct"]: # the value of a 'spec' entry is a word, not an FStructure return (node["word"], node["tag"]) else: fstruct = FStructure() fstruct.pred = None fstruct.label = FStructure._make_label(label_counter.get()) fstruct.parent = parent word, tag = node["word"], node["tag"] if tag[:2] == "VB": if tag[2:3] == "D": fstruct.safeappend("tense", ("PAST", "tense")) fstruct.pred = (word, tag[:2]) if not fstruct.pred: fstruct.pred = (word, tag) children = [ depgraph.nodes[idx] for idx in chain.from_iterable(node["deps"].values()) ] for child in children: fstruct.safeappend( child["rel"], FStructure._read_depgraph(child, depgraph, label_counter, fstruct), ) return fstruct def _make_label(value): """ Pick an alphabetic character as identifier for an entity in the model. :param value: where to index into the list of characters :type value: int """ letter = [ "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "a", "b", "c", "d", "e", ][value - 1] num = int(value) // 26 if num > 0: return letter + str(num) else: return letter def __repr__(self): return self.__str__().replace("\n", "") def __str__(self): return self.pretty_format() def pretty_format(self, indent=3): try: accum = "%s:[" % self.label except NameError: accum = "[" try: accum += "pred '%s'" % (self.pred[0]) except NameError: pass for feature in sorted(self): for item in self[feature]: if isinstance(item, FStructure): next_indent = indent + len(feature) + 3 + len(self.label) accum += "\n{}{} {}".format( " " * (indent), feature, item.pretty_format(next_indent), ) elif isinstance(item, tuple): accum += "\n{}{} '{}'".format(" " * (indent), feature, item[0]) elif isinstance(item, list): accum += "\n{}{} {{{}}}".format( " " * (indent), feature, ("\n%s" % (" " * (indent + len(feature) + 2))).join(item), ) else: # ERROR raise Exception( "feature %s is not an FStruct, a list, or a tuple" % feature ) return accum + "]" class DependencyGraph: """ A container for the nodes and labelled edges of a dependency structure. """ def __init__( self, tree_str=None, cell_extractor=None, zero_based=False, cell_separator=None, top_relation_label="ROOT", ): """Dependency graph. We place a dummy `TOP` node with the index 0, since the root node is often assigned 0 as its head. This also means that the indexing of the nodes corresponds directly to the Malt-TAB format, which starts at 1. If zero-based is True, then Malt-TAB-like input with node numbers starting at 0 and the root node assigned -1 (as produced by, e.g., zpar). :param str cell_separator: the cell separator. If not provided, cells are split by whitespace. :param str top_relation_label: the label by which the top relation is identified, for examlple, `ROOT`, `null` or `TOP`. """ self.nodes = defaultdict( lambda: { "address": None, "word": None, "lemma": None, "ctag": None, "tag": None, "feats": None, "head": None, "deps": defaultdict(list), "rel": None, } ) self.nodes[0].update({"ctag": "TOP", "tag": "TOP", "address": 0}) self.root = None if tree_str: self._parse( tree_str, cell_extractor=cell_extractor, zero_based=zero_based, cell_separator=cell_separator, top_relation_label=top_relation_label, ) def remove_by_address(self, address): """ Removes the node with the given address. References to this node in others will still exist. """ del self.nodes[address] def redirect_arcs(self, originals, redirect): """ Redirects arcs to any of the nodes in the originals list to the redirect node address. """ for node in self.nodes.values(): new_deps = [] for dep in node["deps"]: if dep in originals: new_deps.append(redirect) else: new_deps.append(dep) node["deps"] = new_deps def add_arc(self, head_address, mod_address): """ Adds an arc from the node specified by head_address to the node specified by the mod address. """ relation = self.nodes[mod_address]["rel"] self.nodes[head_address]["deps"].setdefault(relation, []) self.nodes[head_address]["deps"][relation].append(mod_address) # self.nodes[head_address]['deps'].append(mod_address) def connect_graph(self): """ Fully connects all non-root nodes. All nodes are set to be dependents of the root node. """ for node1 in self.nodes.values(): for node2 in self.nodes.values(): if node1["address"] != node2["address"] and node2["rel"] != "TOP": relation = node2["rel"] node1["deps"].setdefault(relation, []) node1["deps"][relation].append(node2["address"]) # node1['deps'].append(node2['address']) def get_by_address(self, node_address): """Return the node with the given address.""" return self.nodes[node_address] def contains_address(self, node_address): """ Returns true if the graph contains a node with the given node address, false otherwise. """ return node_address in self.nodes def to_dot(self): """Return a dot representation suitable for using with Graphviz. >>> dg = DependencyGraph( ... 'John N 2\\n' ... 'loves V 0\\n' ... 'Mary N 2' ... ) >>> print(dg.to_dot()) digraph G{ edge [dir=forward] node [shape=plaintext] <BLANKLINE> 0 [label="0 (None)"] 0 -> 2 [label="ROOT"] 1 [label="1 (John)"] 2 [label="2 (loves)"] 2 -> 1 [label=""] 2 -> 3 [label=""] 3 [label="3 (Mary)"] } """ # Start the digraph specification s = "digraph G{\n" s += "edge [dir=forward]\n" s += "node [shape=plaintext]\n" # Draw the remaining nodes for node in sorted(self.nodes.values(), key=lambda v: v["address"]): s += '\n{} [label="{} ({})"]'.format( node["address"], node["address"], node["word"], ) for rel, deps in node["deps"].items(): for dep in deps: if rel is not None: s += '\n{} -> {} [label="{}"]'.format(node["address"], dep, rel) else: s += "\n{} -> {} ".format(node["address"], dep) s += "\n}" return s def _repr_svg_(self): """Show SVG representation of the transducer (IPython magic). >>> from nltk.test.setup_fixt import check_binary >>> check_binary('dot') >>> dg = DependencyGraph( ... 'John N 2\\n' ... 'loves V 0\\n' ... 'Mary N 2' ... ) >>> dg._repr_svg_().split('\\n')[0] '<?xml version="1.0" encoding="UTF-8" standalone="no"?>' """ dot_string = self.to_dot() return dot2img(dot_string) def __str__(self): return pformat(self.nodes) def __repr__(self): return f"<DependencyGraph with {len(self.nodes)} nodes>" def load( filename, zero_based=False, cell_separator=None, top_relation_label="ROOT" ): """ :param filename: a name of a file in Malt-TAB format :param zero_based: nodes in the input file are numbered starting from 0 rather than 1 (as produced by, e.g., zpar) :param str cell_separator: the cell separator. If not provided, cells are split by whitespace. :param str top_relation_label: the label by which the top relation is identified, for examlple, `ROOT`, `null` or `TOP`. :return: a list of DependencyGraphs """ with open(filename) as infile: return [ DependencyGraph( tree_str, zero_based=zero_based, cell_separator=cell_separator, top_relation_label=top_relation_label, ) for tree_str in infile.read().split("\n\n") ] def left_children(self, node_index): """ Returns the number of left children under the node specified by the given address. """ children = chain.from_iterable(self.nodes[node_index]["deps"].values()) index = self.nodes[node_index]["address"] return sum(1 for c in children if c < index) def right_children(self, node_index): """ Returns the number of right children under the node specified by the given address. """ children = chain.from_iterable(self.nodes[node_index]["deps"].values()) index = self.nodes[node_index]["address"] return sum(1 for c in children if c > index) def add_node(self, node): if not self.contains_address(node["address"]): self.nodes[node["address"]].update(node) def _parse( self, input_, cell_extractor=None, zero_based=False, cell_separator=None, top_relation_label="ROOT", ): """Parse a sentence. :param extractor: a function that given a tuple of cells returns a 7-tuple, where the values are ``word, lemma, ctag, tag, feats, head, rel``. :param str cell_separator: the cell separator. If not provided, cells are split by whitespace. :param str top_relation_label: the label by which the top relation is identified, for examlple, `ROOT`, `null` or `TOP`. """ def extract_3_cells(cells, index): word, tag, head = cells return index, word, word, tag, tag, "", head, "" def extract_4_cells(cells, index): word, tag, head, rel = cells return index, word, word, tag, tag, "", head, rel def extract_7_cells(cells, index): line_index, word, lemma, tag, _, head, rel = cells try: index = int(line_index) except ValueError: # index can't be parsed as an integer, use default pass return index, word, lemma, tag, tag, "", head, rel def extract_10_cells(cells, index): line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells try: index = int(line_index) except ValueError: # index can't be parsed as an integer, use default pass return index, word, lemma, ctag, tag, feats, head, rel extractors = { 3: extract_3_cells, 4: extract_4_cells, 7: extract_7_cells, 10: extract_10_cells, } if isinstance(input_, str): input_ = (line for line in input_.split("\n")) lines = (l.rstrip() for l in input_) lines = (l for l in lines if l) cell_number = None for index, line in enumerate(lines, start=1): cells = line.split(cell_separator) if cell_number is None: cell_number = len(cells) else: assert cell_number == len(cells) if cell_extractor is None: try: cell_extractor = extractors[cell_number] except KeyError as e: raise ValueError( "Number of tab-delimited fields ({}) not supported by " "CoNLL(10) or Malt-Tab(4) format".format(cell_number) ) from e try: index, word, lemma, ctag, tag, feats, head, rel = cell_extractor( cells, index ) except (TypeError, ValueError): # cell_extractor doesn't take 2 arguments or doesn't return 8 # values; assume the cell_extractor is an older external # extractor and doesn't accept or return an index. word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells) if head == "_": continue head = int(head) if zero_based: head += 1 self.nodes[index].update( { "address": index, "word": word, "lemma": lemma, "ctag": ctag, "tag": tag, "feats": feats, "head": head, "rel": rel, } ) # Make sure that the fake root node has labeled dependencies. if (cell_number == 3) and (head == 0): rel = top_relation_label self.nodes[head]["deps"][rel].append(index) if self.nodes[0]["deps"][top_relation_label]: root_address = self.nodes[0]["deps"][top_relation_label][0] self.root = self.nodes[root_address] self.top_relation_label = top_relation_label else: warnings.warn( "The graph doesn't contain a node " "that depends on the root element." ) def _word(self, node, filter=True): w = node["word"] if filter: if w != ",": return w return w def _tree(self, i): """Turn dependency graphs into NLTK trees. :param int i: index of a node :return: either a word (if the indexed node is a leaf) or a ``Tree``. """ node = self.get_by_address(i) word = node["word"] deps = sorted(chain.from_iterable(node["deps"].values())) if deps: return Tree(word, [self._tree(dep) for dep in deps]) else: return word def tree(self): """ Starting with the ``root`` node, build a dependency tree using the NLTK ``Tree`` constructor. Dependency labels are omitted. """ node = self.root word = node["word"] deps = sorted(chain.from_iterable(node["deps"].values())) return Tree(word, [self._tree(dep) for dep in deps]) def triples(self, node=None): """ Extract dependency triples of the form: ((head word, head tag), rel, (dep word, dep tag)) """ if not node: node = self.root head = (node["word"], node["ctag"]) for i in sorted(chain.from_iterable(node["deps"].values())): dep = self.get_by_address(i) yield (head, dep["rel"], (dep["word"], dep["ctag"])) yield from self.triples(node=dep) def _hd(self, i): try: return self.nodes[i]["head"] except IndexError: return None def _rel(self, i): try: return self.nodes[i]["rel"] except IndexError: return None # what's the return type? Boolean or list? def contains_cycle(self): """Check whether there are cycles. >>> dg = DependencyGraph(treebank_data) >>> dg.contains_cycle() False >>> cyclic_dg = DependencyGraph() >>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0} >>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1} >>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2} >>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3} >>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4} >>> cyclic_dg.nodes = { ... 0: top, ... 1: child1, ... 2: child2, ... 3: child3, ... 4: child4, ... } >>> cyclic_dg.root = top >>> cyclic_dg.contains_cycle() [1, 2, 4, 3] """ distances = {} for node in self.nodes.values(): for dep in node["deps"]: key = tuple([node["address"], dep]) distances[key] = 1 for _ in self.nodes: new_entries = {} for pair1 in distances: for pair2 in distances: if pair1[1] == pair2[0]: key = tuple([pair1[0], pair2[1]]) new_entries[key] = distances[pair1] + distances[pair2] for pair in new_entries: distances[pair] = new_entries[pair] if pair[0] == pair[1]: path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0]) return path return False # return []? def get_cycle_path(self, curr_node, goal_node_index): for dep in curr_node["deps"]: if dep == goal_node_index: return [curr_node["address"]] for dep in curr_node["deps"]: path = self.get_cycle_path(self.get_by_address(dep), goal_node_index) if len(path) > 0: path.insert(0, curr_node["address"]) return path return [] def to_conll(self, style): """ The dependency graph in CoNLL format. :param style: the style to use for the format (3, 4, 10 columns) :type style: int :rtype: str """ if style == 3: template = "{word}\t{tag}\t{head}\n" elif style == 4: template = "{word}\t{tag}\t{head}\t{rel}\n" elif style == 10: template = ( "{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n" ) else: raise ValueError( "Number of tab-delimited fields ({}) not supported by " "CoNLL(10) or Malt-Tab(4) format".format(style) ) return "".join( template.format(i=i, **node) for i, node in sorted(self.nodes.items()) if node["tag"] != "TOP" ) def nx_graph(self): """Convert the data in a ``nodelist`` into a networkx labeled directed graph.""" import networkx nx_nodelist = list(range(1, len(self.nodes))) nx_edgelist = [ (n, self._hd(n), self._rel(n)) for n in nx_nodelist if self._hd(n) ] self.nx_labels = {} for n in nx_nodelist: self.nx_labels[n] = self.nodes[n]["word"] g = networkx.MultiDiGraph() g.add_nodes_from(nx_nodelist) g.add_edges_from(nx_edgelist) return g def demo_read_depgraph(): from nltk.parse.dependencygraph import DependencyGraph dg1 = DependencyGraph( """\ Esso NNP 2 SUB said VBD 0 ROOT the DT 5 NMOD Whiting NNP 5 NMOD field NN 6 SUB started VBD 2 VMOD production NN 6 OBJ Tuesday NNP 6 VMOD """ ) dg2 = DependencyGraph( """\ John NNP 2 SUB sees VBP 0 ROOT Mary NNP 2 OBJ """ ) dg3 = DependencyGraph( """\ a DT 2 SPEC man NN 3 SUBJ walks VB 0 ROOT """ ) dg4 = DependencyGraph( """\ every DT 2 SPEC girl NN 3 SUBJ chases VB 0 ROOT a DT 5 SPEC dog NN 3 OBJ """ ) depgraphs = [dg1, dg2, dg3, dg4] for dg in depgraphs: print(FStructure.read_depgraph(dg))
null
170,717
import inspect import re import sys import textwrap from pprint import pformat from nltk.decorators import decorator from nltk.sem.logic import ( AbstractVariableExpression, AllExpression, AndExpression, ApplicationExpression, EqualityExpression, ExistsExpression, Expression, IffExpression, ImpExpression, IndividualVariableExpression, IotaExpression, LambdaExpression, NegatedExpression, OrExpression, Variable, is_indvar, ) The provided code snippet includes necessary dependencies for implementing the `is_rel` function. Write a Python function `def is_rel(s)` to solve the following problem: Check whether a set represents a relation (of any arity). :param s: a set containing tuples of str elements :type s: set :rtype: bool Here is the function: def is_rel(s): """ Check whether a set represents a relation (of any arity). :param s: a set containing tuples of str elements :type s: set :rtype: bool """ # we have the empty relation, i.e. set() if len(s) == 0: return True # all the elements are tuples of the same length elif all(isinstance(el, tuple) for el in s) and len(max(s)) == len(min(s)): return True else: raise ValueError("Set %r contains sequences of different lengths" % s)
Check whether a set represents a relation (of any arity). :param s: a set containing tuples of str elements :type s: set :rtype: bool
170,718
import inspect import re import sys import textwrap from pprint import pformat from nltk.decorators import decorator from nltk.sem.logic import ( AbstractVariableExpression, AllExpression, AndExpression, ApplicationExpression, EqualityExpression, ExistsExpression, Expression, IffExpression, ImpExpression, IndividualVariableExpression, IotaExpression, LambdaExpression, NegatedExpression, OrExpression, Variable, is_indvar, ) The provided code snippet includes necessary dependencies for implementing the `set2rel` function. Write a Python function `def set2rel(s)` to solve the following problem: Convert a set containing individuals (strings or numbers) into a set of unary tuples. Any tuples of strings already in the set are passed through unchanged. For example: - set(['a', 'b']) => set([('a',), ('b',)]) - set([3, 27]) => set([('3',), ('27',)]) :type s: set :rtype: set of tuple of str Here is the function: def set2rel(s): """ Convert a set containing individuals (strings or numbers) into a set of unary tuples. Any tuples of strings already in the set are passed through unchanged. For example: - set(['a', 'b']) => set([('a',), ('b',)]) - set([3, 27]) => set([('3',), ('27',)]) :type s: set :rtype: set of tuple of str """ new = set() for elem in s: if isinstance(elem, str): new.add((elem,)) elif isinstance(elem, int): new.add(str(elem)) else: new.add(elem) return new
Convert a set containing individuals (strings or numbers) into a set of unary tuples. Any tuples of strings already in the set are passed through unchanged. For example: - set(['a', 'b']) => set([('a',), ('b',)]) - set([3, 27]) => set([('3',), ('27',)]) :type s: set :rtype: set of tuple of str
170,719
import inspect import re import sys import textwrap from pprint import pformat from nltk.decorators import decorator from nltk.sem.logic import ( AbstractVariableExpression, AllExpression, AndExpression, ApplicationExpression, EqualityExpression, ExistsExpression, Expression, IffExpression, ImpExpression, IndividualVariableExpression, IotaExpression, LambdaExpression, NegatedExpression, OrExpression, Variable, is_indvar, ) The provided code snippet includes necessary dependencies for implementing the `arity` function. Write a Python function `def arity(rel)` to solve the following problem: Check the arity of a relation. :type rel: set of tuples :rtype: int of tuple of str Here is the function: def arity(rel): """ Check the arity of a relation. :type rel: set of tuples :rtype: int of tuple of str """ if len(rel) == 0: return 0 return len(list(rel)[0])
Check the arity of a relation. :type rel: set of tuples :rtype: int of tuple of str
170,720
import inspect import re import sys import textwrap from pprint import pformat from nltk.decorators import decorator from nltk.sem.logic import ( AbstractVariableExpression, AllExpression, AndExpression, ApplicationExpression, EqualityExpression, ExistsExpression, Expression, IffExpression, ImpExpression, IndividualVariableExpression, IotaExpression, LambdaExpression, NegatedExpression, OrExpression, Variable, is_indvar, ) class Valuation(dict): """ A dictionary which represents a model-theoretic Valuation of non-logical constants. Keys are strings representing the constants to be interpreted, and values correspond to individuals (represented as strings) and n-ary relations (represented as sets of tuples of strings). An instance of ``Valuation`` will raise a KeyError exception (i.e., just behave like a standard dictionary) if indexed with an expression that is not in its list of symbols. """ def __init__(self, xs): """ :param xs: a list of (symbol, value) pairs. """ super().__init__() for (sym, val) in xs: if isinstance(val, str) or isinstance(val, bool): self[sym] = val elif isinstance(val, set): self[sym] = set2rel(val) else: msg = textwrap.fill( "Error in initializing Valuation. " "Unrecognized value for symbol '%s':\n%s" % (sym, val), width=66, ) raise ValueError(msg) def __getitem__(self, key): if key in self: return dict.__getitem__(self, key) else: raise Undefined("Unknown expression: '%s'" % key) def __str__(self): return pformat(self) def domain(self): """Set-theoretic domain of the value-space of a Valuation.""" dom = [] for val in self.values(): if isinstance(val, str): dom.append(val) elif not isinstance(val, bool): dom.extend( [elem for tuple_ in val for elem in tuple_ if elem is not None] ) return set(dom) def symbols(self): """The non-logical constants which the Valuation recognizes.""" return sorted(self.keys()) def fromstring(cls, s): return read_valuation(s) def _read_valuation_line(s): """ Read a line in a valuation file. Lines are expected to be of the form:: noosa => n girl => {g1, g2} chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)} :param s: input line :type s: str :return: a pair (symbol, value) :rtype: tuple """ pieces = _VAL_SPLIT_RE.split(s) symbol = pieces[0] value = pieces[1] # check whether the value is meant to be a set if value.startswith("{"): value = value[1:-1] tuple_strings = _TUPLES_RE.findall(value) # are the set elements tuples? if tuple_strings: set_elements = [] for ts in tuple_strings: ts = ts[1:-1] element = tuple(_ELEMENT_SPLIT_RE.split(ts)) set_elements.append(element) else: set_elements = _ELEMENT_SPLIT_RE.split(value) value = set(set_elements) return symbol, value The provided code snippet includes necessary dependencies for implementing the `read_valuation` function. Write a Python function `def read_valuation(s, encoding=None)` to solve the following problem: Convert a valuation string into a valuation. :param s: a valuation string :type s: str :param encoding: the encoding of the input string, if it is binary :type encoding: str :return: a ``nltk.sem`` valuation :rtype: Valuation Here is the function: def read_valuation(s, encoding=None): """ Convert a valuation string into a valuation. :param s: a valuation string :type s: str :param encoding: the encoding of the input string, if it is binary :type encoding: str :return: a ``nltk.sem`` valuation :rtype: Valuation """ if encoding is not None: s = s.decode(encoding) statements = [] for linenum, line in enumerate(s.splitlines()): line = line.strip() if line.startswith("#") or line == "": continue try: statements.append(_read_valuation_line(line)) except ValueError as e: raise ValueError(f"Unable to parse line {linenum}: {line}") from e return Valuation(statements)
Convert a valuation string into a valuation. :param s: a valuation string :type s: str :param encoding: the encoding of the input string, if it is binary :type encoding: str :return: a ``nltk.sem`` valuation :rtype: Valuation
170,721
import inspect import re import sys import textwrap from pprint import pformat from nltk.decorators import decorator from nltk.sem.logic import ( AbstractVariableExpression, AllExpression, AndExpression, ApplicationExpression, EqualityExpression, ExistsExpression, Expression, IffExpression, ImpExpression, IndividualVariableExpression, IotaExpression, LambdaExpression, NegatedExpression, OrExpression, Variable, is_indvar, ) def propdemo(trace=None): """Example of a propositional model.""" global val1, dom1, m1, g1 val1 = Valuation([("P", True), ("Q", True), ("R", False)]) dom1 = set() m1 = Model(dom1, val1) g1 = Assignment(dom1) print() print("*" * mult) print("Propositional Formulas Demo") print("*" * mult) print("(Propositional constants treated as nullary predicates)") print() print("Model m1:\n", m1) print("*" * mult) sentences = [ "(P & Q)", "(P & R)", "- P", "- R", "- - P", "- (P & R)", "(P | R)", "(R | P)", "(R | R)", "(- P | R)", "(P | - P)", "(P -> Q)", "(P -> R)", "(R -> P)", "(P <-> P)", "(R <-> R)", "(P <-> R)", ] for sent in sentences: if trace: print() m1.evaluate(sent, g1, trace) else: print(f"The value of '{sent}' is: {m1.evaluate(sent, g1)}") def folmodel(quiet=False, trace=None): """Example of a first-order model.""" global val2, v2, dom2, m2, g2 v2 = [ ("adam", "b1"), ("betty", "g1"), ("fido", "d1"), ("girl", {"g1", "g2"}), ("boy", {"b1", "b2"}), ("dog", {"d1"}), ("love", {("b1", "g1"), ("b2", "g2"), ("g1", "b1"), ("g2", "b1")}), ] val2 = Valuation(v2) dom2 = val2.domain m2 = Model(dom2, val2) g2 = Assignment(dom2, [("x", "b1"), ("y", "g2")]) if not quiet: print() print("*" * mult) print("Models Demo") print("*" * mult) print("Model m2:\n", "-" * 14, "\n", m2) print("Variable assignment = ", g2) exprs = ["adam", "boy", "love", "walks", "x", "y", "z"] parsed_exprs = [Expression.fromstring(e) for e in exprs] print() for parsed in parsed_exprs: try: print( "The interpretation of '%s' in m2 is %s" % (parsed, m2.i(parsed, g2)) ) except Undefined: print("The interpretation of '%s' in m2 is Undefined" % parsed) applications = [ ("boy", ("adam")), ("walks", ("adam",)), ("love", ("adam", "y")), ("love", ("y", "adam")), ] for (fun, args) in applications: try: funval = m2.i(Expression.fromstring(fun), g2) argsval = tuple(m2.i(Expression.fromstring(arg), g2) for arg in args) print(f"{fun}({args}) evaluates to {argsval in funval}") except Undefined: print(f"{fun}({args}) evaluates to Undefined") def foldemo(trace=None): """ Interpretation of closed expressions in a first-order model. """ folmodel(quiet=True) print() print("*" * mult) print("FOL Formulas Demo") print("*" * mult) formulas = [ "love (adam, betty)", "(adam = mia)", "\\x. (boy(x) | girl(x))", "\\x. boy(x)(adam)", "\\x y. love(x, y)", "\\x y. love(x, y)(adam)(betty)", "\\x y. love(x, y)(adam, betty)", "\\x y. (boy(x) & love(x, y))", "\\x. exists y. (boy(x) & love(x, y))", "exists z1. boy(z1)", "exists x. (boy(x) & -(x = adam))", "exists x. (boy(x) & all y. love(y, x))", "all x. (boy(x) | girl(x))", "all x. (girl(x) -> exists y. boy(y) & love(x, y))", # Every girl loves exists boy. "exists x. (boy(x) & all y. (girl(y) -> love(y, x)))", # There is exists boy that every girl loves. "exists x. (boy(x) & all y. (girl(y) -> love(x, y)))", # exists boy loves every girl. "all x. (dog(x) -> - girl(x))", "exists x. exists y. (love(x, y) & love(x, y))", ] for fmla in formulas: g2.purge() if trace: m2.evaluate(fmla, g2, trace) else: print(f"The value of '{fmla}' is: {m2.evaluate(fmla, g2)}") def satdemo(trace=None): """Satisfiers of an open formula in a first order model.""" print() print("*" * mult) print("Satisfiers Demo") print("*" * mult) folmodel(quiet=True) formulas = [ "boy(x)", "(x = x)", "(boy(x) | girl(x))", "(boy(x) & girl(x))", "love(adam, x)", "love(x, adam)", "-(x = adam)", "exists z22. love(x, z22)", "exists y. love(y, x)", "all y. (girl(y) -> love(x, y))", "all y. (girl(y) -> love(y, x))", "all y. (girl(y) -> (boy(x) & love(y, x)))", "(boy(x) & all y. (girl(y) -> love(x, y)))", "(boy(x) & all y. (girl(y) -> love(y, x)))", "(boy(x) & exists y. (girl(y) & love(y, x)))", "(girl(x) -> dog(x))", "all y. (dog(y) -> (x = y))", "exists y. love(y, x)", "exists y. (love(adam, y) & love(y, x))", ] if trace: print(m2) for fmla in formulas: print(fmla) Expression.fromstring(fmla) parsed = [Expression.fromstring(fmla) for fmla in formulas] for p in parsed: g2.purge() print( "The satisfiers of '{}' are: {}".format(p, m2.satisfiers(p, "x", g2, trace)) ) The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo(num=0, trace=None)` to solve the following problem: Run exists demos. - num = 1: propositional logic demo - num = 2: first order model demo (only if trace is set) - num = 3: first order sentences demo - num = 4: satisfaction of open formulas demo - any other value: run all the demos :param trace: trace = 1, or trace = 2 for more verbose tracing Here is the function: def demo(num=0, trace=None): """ Run exists demos. - num = 1: propositional logic demo - num = 2: first order model demo (only if trace is set) - num = 3: first order sentences demo - num = 4: satisfaction of open formulas demo - any other value: run all the demos :param trace: trace = 1, or trace = 2 for more verbose tracing """ demos = {1: propdemo, 2: folmodel, 3: foldemo, 4: satdemo} try: demos[num](trace=trace) except KeyError: for num in demos: demos[num](trace=trace)
Run exists demos. - num = 1: propositional logic demo - num = 2: first order model demo (only if trace is set) - num = 3: first order sentences demo - num = 4: satisfaction of open formulas demo - any other value: run all the demos :param trace: trace = 1, or trace = 2 for more verbose tracing
170,722
import gc import re import nltk The provided code snippet includes necessary dependencies for implementing the `_make_bound_method` function. Write a Python function `def _make_bound_method(func, self)` to solve the following problem: Magic for creating bound methods (used for _unload). Here is the function: def _make_bound_method(func, self): """ Magic for creating bound methods (used for _unload). """ class Foo: def meth(self): pass f = Foo() bound_method = type(f.meth) try: return bound_method(func, self, self.__class__) except TypeError: # python3 return bound_method(func, self)
Magic for creating bound methods (used for _unload).
170,723
import functools from nltk.corpus.reader.api import CorpusReader from nltk.corpus.reader.util import StreamBackedCorpusView, concat def _parse_args(fun): @functools.wraps(fun) def decorator(self, fileids=None, **kwargs): kwargs.pop("tags", None) if not fileids: fileids = self.fileids() return fun(self, fileids, **kwargs) return decorator
null
170,724
from collections import namedtuple from functools import partial, wraps from nltk.corpus.reader.api import CategorizedCorpusReader from nltk.corpus.reader.plaintext import PlaintextCorpusReader from nltk.corpus.reader.util import concat, read_blankline_block from nltk.tokenize import blankline_tokenize, sent_tokenize, word_tokenize def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ... The provided code snippet includes necessary dependencies for implementing the `comma_separated_string_args` function. Write a Python function `def comma_separated_string_args(func)` to solve the following problem: A decorator that allows a function to be called with a single string of comma-separated values which become individual function arguments. Here is the function: def comma_separated_string_args(func): """ A decorator that allows a function to be called with a single string of comma-separated values which become individual function arguments. """ @wraps(func) def wrapper(*args, **kwargs): _args = list() for arg in args: if isinstance(arg, str): _args.append({part.strip() for part in arg.split(",")}) elif isinstance(arg, list): _args.append(set(arg)) else: _args.append(arg) for name, value in kwargs.items(): if isinstance(value, str): kwargs[name] = {part.strip() for part in value.split(",")} return func(*_args, **kwargs) return wrapper
A decorator that allows a function to be called with a single string of comma-separated values which become individual function arguments.
170,725
from collections import namedtuple from functools import partial, wraps from nltk.corpus.reader.api import CategorizedCorpusReader from nltk.corpus.reader.plaintext import PlaintextCorpusReader from nltk.corpus.reader.util import concat, read_blankline_block from nltk.tokenize import blankline_tokenize, sent_tokenize, word_tokenize def read_blankline_block(stream): s = "" while True: line = stream.readline() # End of file: if not line: if s: return [s] else: return [] # Blank line: elif line and not line.strip(): if s: return [s] # Other line: else: s += line def read_parse_blankline_block(stream, parser): block = read_blankline_block(stream) if block: return [parser.render(block[0])] return block
null
170,726
import bisect import os import pickle import re import tempfile from functools import reduce from xml.etree import ElementTree from nltk.data import ( FileSystemPathPointer, PathPointer, SeekableUnicodeStreamReader, ZipFilePathPointer, ) from nltk.internals import slice_bounds from nltk.tokenize import wordpunct_tokenize from nltk.util import AbstractLazySequence, LazyConcatenation, LazySubsequence class StreamBackedCorpusView(AbstractLazySequence): """ A 'view' of a corpus file, which acts like a sequence of tokens: it can be accessed by index, iterated over, etc. However, the tokens are only constructed as-needed -- the entire corpus is never stored in memory at once. The constructor to ``StreamBackedCorpusView`` takes two arguments: a corpus fileid (specified as a string or as a ``PathPointer``); and a block reader. A "block reader" is a function that reads zero or more tokens from a stream, and returns them as a list. A very simple example of a block reader is: >>> def simple_block_reader(stream): ... return stream.readline().split() This simple block reader reads a single line at a time, and returns a single token (consisting of a string) for each whitespace-separated substring on the line. When deciding how to define the block reader for a given corpus, careful consideration should be given to the size of blocks handled by the block reader. Smaller block sizes will increase the memory requirements of the corpus view's internal data structures (by 2 integers per block). On the other hand, larger block sizes may decrease performance for random access to the corpus. (But note that larger block sizes will *not* decrease performance for iteration.) Internally, ``CorpusView`` maintains a partial mapping from token index to file position, with one entry per block. When a token with a given index *i* is requested, the ``CorpusView`` constructs it as follows: 1. First, it searches the toknum/filepos mapping for the token index closest to (but less than or equal to) *i*. 2. Then, starting at the file position corresponding to that index, it reads one block at a time using the block reader until it reaches the requested token. The toknum/filepos mapping is created lazily: it is initially empty, but every time a new block is read, the block's initial token is added to the mapping. (Thus, the toknum/filepos map has one entry per block.) In order to increase efficiency for random access patterns that have high degrees of locality, the corpus view may cache one or more blocks. :note: Each ``CorpusView`` object internally maintains an open file object for its underlying corpus file. This file should be automatically closed when the ``CorpusView`` is garbage collected, but if you wish to close it manually, use the ``close()`` method. If you access a ``CorpusView``'s items after it has been closed, the file object will be automatically re-opened. :warning: If the contents of the file are modified during the lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior is undefined. :warning: If a unicode encoding is specified when constructing a ``CorpusView``, then the block reader may only call ``stream.seek()`` with offsets that have been returned by ``stream.tell()``; in particular, calling ``stream.seek()`` with relative offsets, or with offsets based on string lengths, may lead to incorrect behavior. :ivar _block_reader: The function used to read a single block from the underlying file stream. :ivar _toknum: A list containing the token index of each block that has been processed. In particular, ``_toknum[i]`` is the token index of the first token in block ``i``. Together with ``_filepos``, this forms a partial mapping between token indices and file positions. :ivar _filepos: A list containing the file position of each block that has been processed. In particular, ``_toknum[i]`` is the file position of the first character in block ``i``. Together with ``_toknum``, this forms a partial mapping between token indices and file positions. :ivar _stream: The stream used to access the underlying corpus file. :ivar _len: The total number of tokens in the corpus, if known; or None, if the number of tokens is not yet known. :ivar _eofpos: The character position of the last character in the file. This is calculated when the corpus view is initialized, and is used to decide when the end of file has been reached. :ivar _cache: A cache of the most recently read block. It is encoded as a tuple (start_toknum, end_toknum, tokens), where start_toknum is the token index of the first token in the block; end_toknum is the token index of the first token not in the block; and tokens is a list of the tokens in the block. """ def __init__(self, fileid, block_reader=None, startpos=0, encoding="utf8"): """ Create a new corpus view, based on the file ``fileid``, and read with ``block_reader``. See the class documentation for more information. :param fileid: The path to the file that is read by this corpus view. ``fileid`` can either be a string or a ``PathPointer``. :param startpos: The file position at which the view will start reading. This can be used to skip over preface sections. :param encoding: The unicode encoding that should be used to read the file's contents. If no encoding is specified, then the file's contents will be read as a non-unicode string (i.e., a str). """ if block_reader: self.read_block = block_reader # Initialize our toknum/filepos mapping. self._toknum = [0] self._filepos = [startpos] self._encoding = encoding # We don't know our length (number of tokens) yet. self._len = None self._fileid = fileid self._stream = None self._current_toknum = None """This variable is set to the index of the next token that will be read, immediately before ``self.read_block()`` is called. This is provided for the benefit of the block reader, which under rare circumstances may need to know the current token number.""" self._current_blocknum = None """This variable is set to the index of the next block that will be read, immediately before ``self.read_block()`` is called. This is provided for the benefit of the block reader, which under rare circumstances may need to know the current block number.""" # Find the length of the file. try: if isinstance(self._fileid, PathPointer): self._eofpos = self._fileid.file_size() else: self._eofpos = os.stat(self._fileid).st_size except Exception as exc: raise ValueError(f"Unable to open or access {fileid!r} -- {exc}") from exc # Maintain a cache of the most recently read block, to # increase efficiency of random access. self._cache = (-1, -1, None) fileid = property( lambda self: self._fileid, doc=""" The fileid of the file that is accessed by this view. :type: str or PathPointer""", ) def read_block(self, stream): """ Read a block from the input stream. :return: a block of tokens from the input stream :rtype: list(any) :param stream: an input stream :type stream: stream """ raise NotImplementedError("Abstract Method") def _open(self): """ Open the file stream associated with this corpus view. This will be called performed if any value is read from the view while its file stream is closed. """ if isinstance(self._fileid, PathPointer): self._stream = self._fileid.open(self._encoding) elif self._encoding: self._stream = SeekableUnicodeStreamReader( open(self._fileid, "rb"), self._encoding ) else: self._stream = open(self._fileid, "rb") def close(self): """ Close the file stream associated with this corpus view. This can be useful if you are worried about running out of file handles (although the stream should automatically be closed upon garbage collection of the corpus view). If the corpus view is accessed after it is closed, it will be automatically re-opened. """ if self._stream is not None: self._stream.close() self._stream = None def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __len__(self): if self._len is None: # iterate_from() sets self._len when it reaches the end # of the file: for tok in self.iterate_from(self._toknum[-1]): pass return self._len def __getitem__(self, i): if isinstance(i, slice): start, stop = slice_bounds(self, i) # Check if it's in the cache. offset = self._cache[0] if offset <= start and stop <= self._cache[1]: return self._cache[2][start - offset : stop - offset] # Construct & return the result. return LazySubsequence(self, start, stop) else: # Handle negative indices if i < 0: i += len(self) if i < 0: raise IndexError("index out of range") # Check if it's in the cache. offset = self._cache[0] if offset <= i < self._cache[1]: return self._cache[2][i - offset] # Use iterate_from to extract it. try: return next(self.iterate_from(i)) except StopIteration as e: raise IndexError("index out of range") from e # If we wanted to be thread-safe, then this method would need to # do some locking. def iterate_from(self, start_tok): # Start by feeding from the cache, if possible. if self._cache[0] <= start_tok < self._cache[1]: for tok in self._cache[2][start_tok - self._cache[0] :]: yield tok start_tok += 1 # Decide where in the file we should start. If `start` is in # our mapping, then we can jump straight to the correct block; # otherwise, start at the last block we've processed. if start_tok < self._toknum[-1]: block_index = bisect.bisect_right(self._toknum, start_tok) - 1 toknum = self._toknum[block_index] filepos = self._filepos[block_index] else: block_index = len(self._toknum) - 1 toknum = self._toknum[-1] filepos = self._filepos[-1] # Open the stream, if it's not open already. if self._stream is None: self._open() # If the file is empty, the while loop will never run. # This *seems* to be all the state we need to set: if self._eofpos == 0: self._len = 0 # Each iteration through this loop, we read a single block # from the stream. while filepos < self._eofpos: # Read the next block. self._stream.seek(filepos) self._current_toknum = toknum self._current_blocknum = block_index tokens = self.read_block(self._stream) assert isinstance(tokens, (tuple, list, AbstractLazySequence)), ( "block reader %s() should return list or tuple." % self.read_block.__name__ ) num_toks = len(tokens) new_filepos = self._stream.tell() assert ( new_filepos > filepos ), "block reader %s() should consume at least 1 byte (filepos=%d)" % ( self.read_block.__name__, filepos, ) # Update our cache. self._cache = (toknum, toknum + num_toks, list(tokens)) # Update our mapping. assert toknum <= self._toknum[-1] if num_toks > 0: block_index += 1 if toknum == self._toknum[-1]: assert new_filepos > self._filepos[-1] # monotonic! self._filepos.append(new_filepos) self._toknum.append(toknum + num_toks) else: # Check for consistency: assert ( new_filepos == self._filepos[block_index] ), "inconsistent block reader (num chars read)" assert ( toknum + num_toks == self._toknum[block_index] ), "inconsistent block reader (num tokens returned)" # If we reached the end of the file, then update self._len if new_filepos == self._eofpos: self._len = toknum + num_toks # Generate the tokens in this block (but skip any tokens # before start_tok). Note that between yields, our state # may be modified. for tok in tokens[max(0, start_tok - toknum) :]: yield tok # If we're at the end of the file, then we're done. assert new_filepos <= self._eofpos if new_filepos == self._eofpos: break # Update our indices toknum += num_toks filepos = new_filepos # If we reach this point, then we should know our length. assert self._len is not None # Enforce closing of stream once we reached end of file # We should have reached EOF once we're out of the while loop. self.close() # Use concat for these, so we can use a ConcatenatedCorpusView # when possible. def __add__(self, other): return concat([self, other]) def __radd__(self, other): return concat([other, self]) def __mul__(self, count): return concat([self] * count) def __rmul__(self, count): return concat([self] * count) class ConcatenatedCorpusView(AbstractLazySequence): """ A 'view' of a corpus file that joins together one or more ``StreamBackedCorpusViews<StreamBackedCorpusView>``. At most one file handle is left open at any time. """ def __init__(self, corpus_views): self._pieces = corpus_views """A list of the corpus subviews that make up this concatenation.""" self._offsets = [0] """A list of offsets, indicating the index at which each subview begins. In particular:: offsets[i] = sum([len(p) for p in pieces[:i]])""" self._open_piece = None """The most recently accessed corpus subview (or None). Before a new subview is accessed, this subview will be closed.""" def __len__(self): if len(self._offsets) <= len(self._pieces): # Iterate to the end of the corpus. for tok in self.iterate_from(self._offsets[-1]): pass return self._offsets[-1] def close(self): for piece in self._pieces: piece.close() def iterate_from(self, start_tok): piecenum = bisect.bisect_right(self._offsets, start_tok) - 1 while piecenum < len(self._pieces): offset = self._offsets[piecenum] piece = self._pieces[piecenum] # If we've got another piece open, close it first. if self._open_piece is not piece: if self._open_piece is not None: self._open_piece.close() self._open_piece = piece # Get everything we can from this piece. yield from piece.iterate_from(max(0, start_tok - offset)) # Update the offset table. if piecenum + 1 == len(self._offsets): self._offsets.append(self._offsets[-1] + len(piece)) # Move on to the next piece. piecenum += 1 def reduce(function: Callable[[_T, _S], _T], sequence: Iterable[_S], initial: _T) -> _T: ... def reduce(function: Callable[[_T, _T], _T], sequence: Iterable[_T]) -> _T: ... The provided code snippet includes necessary dependencies for implementing the `concat` function. Write a Python function `def concat(docs)` to solve the following problem: Concatenate together the contents of multiple documents from a single corpus, using an appropriate concatenation function. This utility function is used by corpus readers when the user requests more than one document at a time. Here is the function: def concat(docs): """ Concatenate together the contents of multiple documents from a single corpus, using an appropriate concatenation function. This utility function is used by corpus readers when the user requests more than one document at a time. """ if len(docs) == 1: return docs[0] if len(docs) == 0: raise ValueError("concat() expects at least one object!") types = {d.__class__ for d in docs} # If they're all strings, use string concatenation. if all(isinstance(doc, str) for doc in docs): return "".join(docs) # If they're all corpus views, then use ConcatenatedCorpusView. for typ in types: if not issubclass(typ, (StreamBackedCorpusView, ConcatenatedCorpusView)): break else: return ConcatenatedCorpusView(docs) # If they're all lazy sequences, use a lazy concatenation for typ in types: if not issubclass(typ, AbstractLazySequence): break else: return LazyConcatenation(docs) # Otherwise, see what we can do: if len(types) == 1: typ = list(types)[0] if issubclass(typ, list): return reduce((lambda a, b: a + b), docs, []) if issubclass(typ, tuple): return reduce((lambda a, b: a + b), docs, ()) if ElementTree.iselement(typ): xmltree = ElementTree.Element("documents") for doc in docs: xmltree.append(doc) return xmltree # No method found! raise ValueError("Don't know how to concatenate types: %r" % types)
Concatenate together the contents of multiple documents from a single corpus, using an appropriate concatenation function. This utility function is used by corpus readers when the user requests more than one document at a time.
170,727
import bisect import os import pickle import re import tempfile from functools import reduce from xml.etree import ElementTree from nltk.data import ( FileSystemPathPointer, PathPointer, SeekableUnicodeStreamReader, ZipFilePathPointer, ) from nltk.internals import slice_bounds from nltk.tokenize import wordpunct_tokenize from nltk.util import AbstractLazySequence, LazyConcatenation, LazySubsequence def read_whitespace_block(stream): toks = [] for i in range(20): # Read 20 lines at a time. toks.extend(stream.readline().split()) return toks
null
170,728
import bisect import os import pickle import re import tempfile from functools import reduce from xml.etree import ElementTree from nltk.data import ( FileSystemPathPointer, PathPointer, SeekableUnicodeStreamReader, ZipFilePathPointer, ) from nltk.internals import slice_bounds from nltk.tokenize import wordpunct_tokenize from nltk.util import AbstractLazySequence, LazyConcatenation, LazySubsequence def read_wordpunct_block(stream): toks = [] for i in range(20): # Read 20 lines at a time. toks.extend(wordpunct_tokenize(stream.readline())) return toks
null
170,729
import bisect import os import pickle import re import tempfile from functools import reduce from xml.etree import ElementTree from nltk.data import ( FileSystemPathPointer, PathPointer, SeekableUnicodeStreamReader, ZipFilePathPointer, ) from nltk.internals import slice_bounds from nltk.tokenize import wordpunct_tokenize from nltk.util import AbstractLazySequence, LazyConcatenation, LazySubsequence def read_line_block(stream): toks = [] for i in range(20): line = stream.readline() if not line: return toks toks.append(line.rstrip("\n")) return toks
null
170,730
import bisect import os import pickle import re import tempfile from functools import reduce from xml.etree import ElementTree from nltk.data import ( FileSystemPathPointer, PathPointer, SeekableUnicodeStreamReader, ZipFilePathPointer, ) from nltk.internals import slice_bounds from nltk.tokenize import wordpunct_tokenize from nltk.util import AbstractLazySequence, LazyConcatenation, LazySubsequence def read_alignedsent_block(stream): s = "" while True: line = stream.readline() if line[0] == "=" or line[0] == "\n" or line[:2] == "\r\n": continue # End of file: if not line: if s: return [s] else: return [] # Other line: else: s += line if re.match(r"^\d+-\d+", line) is not None: return [s]
null
170,731
import bisect import os import pickle import re import tempfile from functools import reduce from xml.etree import ElementTree from nltk.data import ( FileSystemPathPointer, PathPointer, SeekableUnicodeStreamReader, ZipFilePathPointer, ) from nltk.internals import slice_bounds from nltk.tokenize import wordpunct_tokenize from nltk.util import AbstractLazySequence, LazyConcatenation, LazySubsequence The provided code snippet includes necessary dependencies for implementing the `read_regexp_block` function. Write a Python function `def read_regexp_block(stream, start_re, end_re=None)` to solve the following problem: Read a sequence of tokens from a stream, where tokens begin with lines that match ``start_re``. If ``end_re`` is specified, then tokens end with lines that match ``end_re``; otherwise, tokens end whenever the next line matching ``start_re`` or EOF is found. Here is the function: def read_regexp_block(stream, start_re, end_re=None): """ Read a sequence of tokens from a stream, where tokens begin with lines that match ``start_re``. If ``end_re`` is specified, then tokens end with lines that match ``end_re``; otherwise, tokens end whenever the next line matching ``start_re`` or EOF is found. """ # Scan until we find a line matching the start regexp. while True: line = stream.readline() if not line: return [] # end of file. if re.match(start_re, line): break # Scan until we find another line matching the regexp, or EOF. lines = [line] while True: oldpos = stream.tell() line = stream.readline() # End of file: if not line: return ["".join(lines)] # End of token: if end_re is not None and re.match(end_re, line): return ["".join(lines)] # Start of new token: backup to just before it starts, and # return the token we've already collected. if end_re is None and re.match(start_re, line): stream.seek(oldpos) return ["".join(lines)] # Anything else is part of the token. lines.append(line)
Read a sequence of tokens from a stream, where tokens begin with lines that match ``start_re``. If ``end_re`` is specified, then tokens end with lines that match ``end_re``; otherwise, tokens end whenever the next line matching ``start_re`` or EOF is found.
170,732
import bisect import os import pickle import re import tempfile from functools import reduce from xml.etree import ElementTree from nltk.data import ( FileSystemPathPointer, PathPointer, SeekableUnicodeStreamReader, ZipFilePathPointer, ) from nltk.internals import slice_bounds from nltk.tokenize import wordpunct_tokenize from nltk.util import AbstractLazySequence, LazyConcatenation, LazySubsequence def _sub_space(m): """Helper function: given a regexp match, return a string of spaces that's the same length as the matched string.""" return " " * (m.end() - m.start()) def _parse_sexpr_block(block): tokens = [] start = end = 0 while end < len(block): m = re.compile(r"\S").search(block, end) if not m: return tokens, end start = m.start() # Case 1: sexpr is not parenthesized. if m.group() != "(": m2 = re.compile(r"[\s(]").search(block, start) if m2: end = m2.start() else: if tokens: return tokens, end raise ValueError("Block too small") # Case 2: parenthesized sexpr. else: nesting = 0 for m in re.compile(r"[()]").finditer(block, start): if m.group() == "(": nesting += 1 else: nesting -= 1 if nesting == 0: end = m.end() break else: if tokens: return tokens, end raise ValueError("Block too small") tokens.append(block[start:end]) return tokens, end The provided code snippet includes necessary dependencies for implementing the `read_sexpr_block` function. Write a Python function `def read_sexpr_block(stream, block_size=16384, comment_char=None)` to solve the following problem: Read a sequence of s-expressions from the stream, and leave the stream's file position at the end the last complete s-expression read. This function will always return at least one s-expression, unless there are no more s-expressions in the file. If the file ends in in the middle of an s-expression, then that incomplete s-expression is returned when the end of the file is reached. :param block_size: The default block size for reading. If an s-expression is longer than one block, then more than one block will be read. :param comment_char: A character that marks comments. Any lines that begin with this character will be stripped out. (If spaces or tabs precede the comment character, then the line will not be stripped.) Here is the function: def read_sexpr_block(stream, block_size=16384, comment_char=None): """ Read a sequence of s-expressions from the stream, and leave the stream's file position at the end the last complete s-expression read. This function will always return at least one s-expression, unless there are no more s-expressions in the file. If the file ends in in the middle of an s-expression, then that incomplete s-expression is returned when the end of the file is reached. :param block_size: The default block size for reading. If an s-expression is longer than one block, then more than one block will be read. :param comment_char: A character that marks comments. Any lines that begin with this character will be stripped out. (If spaces or tabs precede the comment character, then the line will not be stripped.) """ start = stream.tell() block = stream.read(block_size) encoding = getattr(stream, "encoding", None) assert encoding is not None or isinstance(block, str) if encoding not in (None, "utf-8"): import warnings warnings.warn( "Parsing may fail, depending on the properties " "of the %s encoding!" % encoding ) # (e.g., the utf-16 encoding does not work because it insists # on adding BOMs to the beginning of encoded strings.) if comment_char: COMMENT = re.compile("(?m)^%s.*$" % re.escape(comment_char)) while True: try: # If we're stripping comments, then make sure our block ends # on a line boundary; and then replace any comments with # space characters. (We can't just strip them out -- that # would make our offset wrong.) if comment_char: block += stream.readline() block = re.sub(COMMENT, _sub_space, block) # Read the block. tokens, offset = _parse_sexpr_block(block) # Skip whitespace offset = re.compile(r"\s*").search(block, offset).end() # Move to the end position. if encoding is None: stream.seek(start + offset) else: stream.seek(start + len(block[:offset].encode(encoding))) # Return the list of tokens we processed return tokens except ValueError as e: if e.args[0] == "Block too small": next_block = stream.read(block_size) if next_block: block += next_block continue else: # The file ended mid-sexpr -- return what we got. return [block.strip()] else: raise
Read a sequence of s-expressions from the stream, and leave the stream's file position at the end the last complete s-expression read. This function will always return at least one s-expression, unless there are no more s-expressions in the file. If the file ends in in the middle of an s-expression, then that incomplete s-expression is returned when the end of the file is reached. :param block_size: The default block size for reading. If an s-expression is longer than one block, then more than one block will be read. :param comment_char: A character that marks comments. Any lines that begin with this character will be stripped out. (If spaces or tabs precede the comment character, then the line will not be stripped.)
170,733
import bisect import os import pickle import re import tempfile from functools import reduce from xml.etree import ElementTree from nltk.data import ( FileSystemPathPointer, PathPointer, SeekableUnicodeStreamReader, ZipFilePathPointer, ) from nltk.internals import slice_bounds from nltk.tokenize import wordpunct_tokenize from nltk.util import AbstractLazySequence, LazyConcatenation, LazySubsequence def tagged_treebank_para_block_reader(stream): # Read the next paragraph. para = "" while True: line = stream.readline() # End of paragraph: if re.match(r"======+\s*$", line): if para.strip(): return [para] # End of file: elif line == "": if para.strip(): return [para] else: return [] # Content line: else: para += line
null
170,734
import re from collections import defaultdict from functools import reduce from nltk.corpus.reader import CorpusReader lin_thesaurus: LinThesaurusCorpusReader = LazyCorpusLoader( "lin_thesaurus", LinThesaurusCorpusReader, r".*\.lsp" ) def demo(): from nltk.corpus import lin_thesaurus as thes word1 = "business" word2 = "enterprise" print("Getting synonyms for " + word1) print(thes.synonyms(word1)) print("Getting scored synonyms for " + word1) print(thes.scored_synonyms(word1)) print("Getting synonyms from simN.lsp (noun subsection) for " + word1) print(thes.synonyms(word1, fileid="simN.lsp")) print("Getting synonyms from simN.lsp (noun subsection) for " + word1) print(thes.synonyms(word1, fileid="simN.lsp")) print(f"Similarity score for {word1} and {word2}:") print(thes.similarity(word1, word2))
null
170,735
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap def _pretty_longstring(defstr, prefix="", wrap_at=65): """ Helper function for pretty-printing a long string. :param defstr: The string to be printed. :type defstr: str :return: A nicely formatted string representation of the long string. :rtype: str """ outstr = "" for line in textwrap.fill(defstr, wrap_at).split("\n"): outstr += prefix + line + "\n" return outstr The provided code snippet includes necessary dependencies for implementing the `_pretty_any` function. Write a Python function `def _pretty_any(obj)` to solve the following problem: Helper function for pretty-printing any AttrDict object. :param obj: The obj to be printed. :type obj: AttrDict :return: A nicely formatted string representation of the AttrDict object. :rtype: str Here is the function: def _pretty_any(obj): """ Helper function for pretty-printing any AttrDict object. :param obj: The obj to be printed. :type obj: AttrDict :return: A nicely formatted string representation of the AttrDict object. :rtype: str """ outstr = "" for k in obj: if isinstance(obj[k], str) and len(obj[k]) > 65: outstr += f"[{k}]\n" outstr += "{}".format(_pretty_longstring(obj[k], prefix=" ")) outstr += "\n" else: outstr += f"[{k}] {obj[k]}\n" return outstr
Helper function for pretty-printing any AttrDict object. :param obj: The obj to be printed. :type obj: AttrDict :return: A nicely formatted string representation of the AttrDict object. :rtype: str
170,736
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap def _pretty_longstring(defstr, prefix="", wrap_at=65): """ Helper function for pretty-printing a long string. :param defstr: The string to be printed. :type defstr: str :return: A nicely formatted string representation of the long string. :rtype: str """ outstr = "" for line in textwrap.fill(defstr, wrap_at).split("\n"): outstr += prefix + line + "\n" return outstr The provided code snippet includes necessary dependencies for implementing the `_pretty_semtype` function. Write a Python function `def _pretty_semtype(st)` to solve the following problem: Helper function for pretty-printing a semantic type. :param st: The semantic type to be printed. :type st: AttrDict :return: A nicely formatted string representation of the semantic type. :rtype: str Here is the function: def _pretty_semtype(st): """ Helper function for pretty-printing a semantic type. :param st: The semantic type to be printed. :type st: AttrDict :return: A nicely formatted string representation of the semantic type. :rtype: str """ semkeys = st.keys() if len(semkeys) == 1: return "<None>" outstr = "" outstr += "semantic type ({0.ID}): {0.name}\n".format(st) if "abbrev" in semkeys: outstr += f"[abbrev] {st.abbrev}\n" if "definition" in semkeys: outstr += "[definition]\n" outstr += _pretty_longstring(st.definition, " ") outstr += f"[rootType] {st.rootType.name}({st.rootType.ID})\n" if st.superType is None: outstr += "[superType] <None>\n" else: outstr += f"[superType] {st.superType.name}({st.superType.ID})\n" outstr += f"[subTypes] {len(st.subTypes)} subtypes\n" outstr += ( " " + ", ".join(f"{x.name}({x.ID})" for x in st.subTypes) + "\n" * (len(st.subTypes) > 0) ) return outstr
Helper function for pretty-printing a semantic type. :param st: The semantic type to be printed. :type st: AttrDict :return: A nicely formatted string representation of the semantic type. :rtype: str
170,737
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap The provided code snippet includes necessary dependencies for implementing the `_pretty_frame_relation_type` function. Write a Python function `def _pretty_frame_relation_type(freltyp)` to solve the following problem: Helper function for pretty-printing a frame relation type. :param freltyp: The frame relation type to be printed. :type freltyp: AttrDict :return: A nicely formatted string representation of the frame relation type. :rtype: str Here is the function: def _pretty_frame_relation_type(freltyp): """ Helper function for pretty-printing a frame relation type. :param freltyp: The frame relation type to be printed. :type freltyp: AttrDict :return: A nicely formatted string representation of the frame relation type. :rtype: str """ outstr = "<frame relation type ({0.ID}): {0.superFrameName} -- {0.name} -> {0.subFrameName}>".format( freltyp ) return outstr
Helper function for pretty-printing a frame relation type. :param freltyp: The frame relation type to be printed. :type freltyp: AttrDict :return: A nicely formatted string representation of the frame relation type. :rtype: str
170,738
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap The provided code snippet includes necessary dependencies for implementing the `_pretty_frame_relation` function. Write a Python function `def _pretty_frame_relation(frel)` to solve the following problem: Helper function for pretty-printing a frame relation. :param frel: The frame relation to be printed. :type frel: AttrDict :return: A nicely formatted string representation of the frame relation. :rtype: str Here is the function: def _pretty_frame_relation(frel): """ Helper function for pretty-printing a frame relation. :param frel: The frame relation to be printed. :type frel: AttrDict :return: A nicely formatted string representation of the frame relation. :rtype: str """ outstr = "<{0.type.superFrameName}={0.superFrameName} -- {0.type.name} -> {0.type.subFrameName}={0.subFrameName}>".format( frel ) return outstr
Helper function for pretty-printing a frame relation. :param frel: The frame relation to be printed. :type frel: AttrDict :return: A nicely formatted string representation of the frame relation. :rtype: str
170,739
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap The provided code snippet includes necessary dependencies for implementing the `_pretty_fe_relation` function. Write a Python function `def _pretty_fe_relation(ferel)` to solve the following problem: Helper function for pretty-printing an FE relation. :param ferel: The FE relation to be printed. :type ferel: AttrDict :return: A nicely formatted string representation of the FE relation. :rtype: str Here is the function: def _pretty_fe_relation(ferel): """ Helper function for pretty-printing an FE relation. :param ferel: The FE relation to be printed. :type ferel: AttrDict :return: A nicely formatted string representation of the FE relation. :rtype: str """ outstr = "<{0.type.superFrameName}={0.frameRelation.superFrameName}.{0.superFEName} -- {0.type.name} -> {0.type.subFrameName}={0.frameRelation.subFrameName}.{0.subFEName}>".format( ferel ) return outstr
Helper function for pretty-printing an FE relation. :param ferel: The FE relation to be printed. :type ferel: AttrDict :return: A nicely formatted string representation of the FE relation. :rtype: str
170,740
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap def _pretty_longstring(defstr, prefix="", wrap_at=65): """ Helper function for pretty-printing a long string. :param defstr: The string to be printed. :type defstr: str :return: A nicely formatted string representation of the long string. :rtype: str """ outstr = "" for line in textwrap.fill(defstr, wrap_at).split("\n"): outstr += prefix + line + "\n" return outstr The provided code snippet includes necessary dependencies for implementing the `_pretty_lu` function. Write a Python function `def _pretty_lu(lu)` to solve the following problem: Helper function for pretty-printing a lexical unit. :param lu: The lu to be printed. :type lu: AttrDict :return: A nicely formatted string representation of the lexical unit. :rtype: str Here is the function: def _pretty_lu(lu): """ Helper function for pretty-printing a lexical unit. :param lu: The lu to be printed. :type lu: AttrDict :return: A nicely formatted string representation of the lexical unit. :rtype: str """ lukeys = lu.keys() outstr = "" outstr += "lexical unit ({0.ID}): {0.name}\n\n".format(lu) if "definition" in lukeys: outstr += "[definition]\n" outstr += _pretty_longstring(lu.definition, " ") if "frame" in lukeys: outstr += f"\n[frame] {lu.frame.name}({lu.frame.ID})\n" if "incorporatedFE" in lukeys: outstr += f"\n[incorporatedFE] {lu.incorporatedFE}\n" if "POS" in lukeys: outstr += f"\n[POS] {lu.POS}\n" if "status" in lukeys: outstr += f"\n[status] {lu.status}\n" if "totalAnnotated" in lukeys: outstr += f"\n[totalAnnotated] {lu.totalAnnotated} annotated examples\n" if "lexemes" in lukeys: outstr += "\n[lexemes] {}\n".format( " ".join(f"{lex.name}/{lex.POS}" for lex in lu.lexemes) ) if "semTypes" in lukeys: outstr += f"\n[semTypes] {len(lu.semTypes)} semantic types\n" outstr += ( " " * (len(lu.semTypes) > 0) + ", ".join(f"{x.name}({x.ID})" for x in lu.semTypes) + "\n" * (len(lu.semTypes) > 0) ) if "URL" in lukeys: outstr += f"\n[URL] {lu.URL}\n" if "subCorpus" in lukeys: subc = [x.name for x in lu.subCorpus] outstr += f"\n[subCorpus] {len(lu.subCorpus)} subcorpora\n" for line in textwrap.fill(", ".join(sorted(subc)), 60).split("\n"): outstr += f" {line}\n" if "exemplars" in lukeys: outstr += "\n[exemplars] {} sentences across all subcorpora\n".format( len(lu.exemplars) ) return outstr
Helper function for pretty-printing a lexical unit. :param lu: The lu to be printed. :type lu: AttrDict :return: A nicely formatted string representation of the lexical unit. :rtype: str
170,741
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap The provided code snippet includes necessary dependencies for implementing the `_pretty_exemplars` function. Write a Python function `def _pretty_exemplars(exemplars, lu)` to solve the following problem: Helper function for pretty-printing a list of exemplar sentences for a lexical unit. :param sent: The list of exemplar sentences to be printed. :type sent: list(AttrDict) :return: An index of the text of the exemplar sentences. :rtype: str Here is the function: def _pretty_exemplars(exemplars, lu): """ Helper function for pretty-printing a list of exemplar sentences for a lexical unit. :param sent: The list of exemplar sentences to be printed. :type sent: list(AttrDict) :return: An index of the text of the exemplar sentences. :rtype: str """ outstr = "" outstr += "exemplar sentences for {0.name} in {0.frame.name}:\n\n".format(lu) for i, sent in enumerate(exemplars): outstr += f"[{i}] {sent.text}\n" outstr += "\n" return outstr
Helper function for pretty-printing a list of exemplar sentences for a lexical unit. :param sent: The list of exemplar sentences to be printed. :type sent: list(AttrDict) :return: An index of the text of the exemplar sentences. :rtype: str
170,742
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap The provided code snippet includes necessary dependencies for implementing the `_pretty_fulltext_sentences` function. Write a Python function `def _pretty_fulltext_sentences(sents)` to solve the following problem: Helper function for pretty-printing a list of annotated sentences for a full-text document. :param sent: The list of sentences to be printed. :type sent: list(AttrDict) :return: An index of the text of the sentences. :rtype: str Here is the function: def _pretty_fulltext_sentences(sents): """ Helper function for pretty-printing a list of annotated sentences for a full-text document. :param sent: The list of sentences to be printed. :type sent: list(AttrDict) :return: An index of the text of the sentences. :rtype: str """ outstr = "" outstr += "full-text document ({0.ID}) {0.name}:\n\n".format(sents) outstr += "[corpid] {0.corpid}\n[corpname] {0.corpname}\n[description] {0.description}\n[URL] {0.URL}\n\n".format( sents ) outstr += f"[sentence]\n" for i, sent in enumerate(sents.sentence): outstr += f"[{i}] {sent.text}\n" outstr += "\n" return outstr
Helper function for pretty-printing a list of annotated sentences for a full-text document. :param sent: The list of sentences to be printed. :type sent: list(AttrDict) :return: An index of the text of the sentences. :rtype: str
170,743
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap The provided code snippet includes necessary dependencies for implementing the `_pretty_fulltext_sentence` function. Write a Python function `def _pretty_fulltext_sentence(sent)` to solve the following problem: Helper function for pretty-printing an annotated sentence from a full-text document. :param sent: The sentence to be printed. :type sent: list(AttrDict) :return: The text of the sentence with annotation set indices on frame targets. :rtype: str Here is the function: def _pretty_fulltext_sentence(sent): """ Helper function for pretty-printing an annotated sentence from a full-text document. :param sent: The sentence to be printed. :type sent: list(AttrDict) :return: The text of the sentence with annotation set indices on frame targets. :rtype: str """ outstr = "" outstr += "full-text sentence ({0.ID}) in {1}:\n\n".format( sent, sent.doc.get("name", sent.doc.description) ) outstr += f"\n[POS] {len(sent.POS)} tags\n" outstr += f"\n[POS_tagset] {sent.POS_tagset}\n\n" outstr += "[text] + [annotationSet]\n\n" outstr += sent._ascii() # -> _annotation_ascii() outstr += "\n" return outstr
Helper function for pretty-printing an annotated sentence from a full-text document. :param sent: The sentence to be printed. :type sent: list(AttrDict) :return: The text of the sentence with annotation set indices on frame targets. :rtype: str
170,744
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap def mimic_wrap(lines, wrap_at=65, **kwargs): """ Wrap the first of 'lines' with textwrap and the remaining lines at exactly the same positions as the first. """ l0 = textwrap.fill(lines[0], wrap_at, drop_whitespace=False).split("\n") yield l0 def _(line): il0 = 0 while line and il0 < len(l0) - 1: yield line[: len(l0[il0])] line = line[len(l0[il0]) :] il0 += 1 if line: # Remaining stuff on this line past the end of the mimicked line. # So just textwrap this line. yield from textwrap.fill(line, wrap_at, drop_whitespace=False).split("\n") for l in lines[1:]: yield list(_(l)) def zip_longest(*p: Iterable[Any], fillvalue: Any = ...) -> Iterator[Any]: ... The provided code snippet includes necessary dependencies for implementing the `_pretty_pos` function. Write a Python function `def _pretty_pos(aset)` to solve the following problem: Helper function for pretty-printing a sentence with its POS tags. :param aset: The POS annotation set of the sentence to be printed. :type sent: list(AttrDict) :return: The text of the sentence and its POS tags. :rtype: str Here is the function: def _pretty_pos(aset): """ Helper function for pretty-printing a sentence with its POS tags. :param aset: The POS annotation set of the sentence to be printed. :type sent: list(AttrDict) :return: The text of the sentence and its POS tags. :rtype: str """ outstr = "" outstr += "POS annotation set ({0.ID}) {0.POS_tagset} in sentence {0.sent.ID}:\n\n".format( aset ) # list the target spans and their associated aset index overt = sorted(aset.POS) sent = aset.sent s0 = sent.text s1 = "" s2 = "" i = 0 adjust = 0 for j, k, lbl in overt: assert j >= i, ("Overlapping targets?", (j, k, lbl)) s1 += " " * (j - i) + "-" * (k - j) if len(lbl) > (k - j): # add space in the sentence to make room for the annotation index amt = len(lbl) - (k - j) s0 = ( s0[: k + adjust] + "~" * amt + s0[k + adjust :] ) # '~' to prevent line wrapping s1 = s1[: k + adjust] + " " * amt + s1[k + adjust :] adjust += amt s2 += " " * (j - i) + lbl.ljust(k - j) i = k long_lines = [s0, s1, s2] outstr += "\n\n".join( map("\n".join, zip_longest(*mimic_wrap(long_lines), fillvalue=" ")) ).replace("~", " ") outstr += "\n" return outstr
Helper function for pretty-printing a sentence with its POS tags. :param aset: The POS annotation set of the sentence to be printed. :type sent: list(AttrDict) :return: The text of the sentence and its POS tags. :rtype: str
170,745
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap The provided code snippet includes necessary dependencies for implementing the `_pretty_annotation` function. Write a Python function `def _pretty_annotation(sent, aset_level=False)` to solve the following problem: Helper function for pretty-printing an exemplar sentence for a lexical unit. :param sent: An annotation set or exemplar sentence to be printed. :param aset_level: If True, 'sent' is actually an annotation set within a sentence. :type sent: AttrDict :return: A nicely formatted string representation of the exemplar sentence with its target, frame, and FE annotations. :rtype: str Here is the function: def _pretty_annotation(sent, aset_level=False): """ Helper function for pretty-printing an exemplar sentence for a lexical unit. :param sent: An annotation set or exemplar sentence to be printed. :param aset_level: If True, 'sent' is actually an annotation set within a sentence. :type sent: AttrDict :return: A nicely formatted string representation of the exemplar sentence with its target, frame, and FE annotations. :rtype: str """ sentkeys = sent.keys() outstr = "annotation set" if aset_level else "exemplar sentence" outstr += f" ({sent.ID}):\n" if aset_level: # TODO: any UNANN exemplars? outstr += f"\n[status] {sent.status}\n" for k in ("corpID", "docID", "paragNo", "sentNo", "aPos"): if k in sentkeys: outstr += f"[{k}] {sent[k]}\n" outstr += ( "\n[LU] ({0.ID}) {0.name} in {0.frame.name}\n".format(sent.LU) if sent.LU else "\n[LU] Not found!" ) outstr += "\n[frame] ({0.ID}) {0.name}\n".format( sent.frame ) # redundant with above, but .frame is convenient if not aset_level: outstr += "\n[annotationSet] {} annotation sets\n".format( len(sent.annotationSet) ) outstr += f"\n[POS] {len(sent.POS)} tags\n" outstr += f"\n[POS_tagset] {sent.POS_tagset}\n" outstr += "\n[GF] {} relation{}\n".format( len(sent.GF), "s" if len(sent.GF) != 1 else "" ) outstr += "\n[PT] {} phrase{}\n".format( len(sent.PT), "s" if len(sent.PT) != 1 else "" ) """ Special Layers -------------- The 'NER' layer contains, for some of the data, named entity labels. The 'WSL' (word status layer) contains, for some of the data, spans which should not in principle be considered targets (NT). The 'Other' layer records relative clause constructions (Rel=relativizer, Ant=antecedent), pleonastic 'it' (Null), and existential 'there' (Exist). On occasion they are duplicated by accident (e.g., annotationSet 1467275 in lu6700.xml). The 'Sent' layer appears to contain labels that the annotator has flagged the sentence with for their convenience: values include 'sense1', 'sense2', 'sense3', etc.; 'Blend', 'Canonical', 'Idiom', 'Metaphor', 'Special-Sent', 'keepS', 'deleteS', 'reexamine' (sometimes they are duplicated for no apparent reason). The POS-specific layers may contain the following kinds of spans: Asp (aspectual particle), Non-Asp (non-aspectual particle), Cop (copula), Supp (support), Ctrlr (controller), Gov (governor), X. Gov and X always cooccur. >>> from nltk.corpus import framenet as fn >>> def f(luRE, lyr, ignore=set()): ... for i,ex in enumerate(fn.exemplars(luRE)): ... if lyr in ex and ex[lyr] and set(zip(*ex[lyr])[2]) - ignore: ... print(i,ex[lyr]) - Verb: Asp, Non-Asp - Noun: Cop, Supp, Ctrlr, Gov, X - Adj: Cop, Supp, Ctrlr, Gov, X - Prep: Cop, Supp, Ctrlr - Adv: Ctrlr - Scon: (none) - Art: (none) """ for lyr in ("NER", "WSL", "Other", "Sent"): if lyr in sent and sent[lyr]: outstr += "\n[{}] {} entr{}\n".format( lyr, len(sent[lyr]), "ies" if len(sent[lyr]) != 1 else "y" ) outstr += "\n[text] + [Target] + [FE]" # POS-specific layers: syntactically important words that are neither the target # nor the FEs. Include these along with the first FE layer but with '^' underlining. for lyr in ("Verb", "Noun", "Adj", "Adv", "Prep", "Scon", "Art"): if lyr in sent and sent[lyr]: outstr += f" + [{lyr}]" if "FE2" in sentkeys: outstr += " + [FE2]" if "FE3" in sentkeys: outstr += " + [FE3]" outstr += "\n\n" outstr += sent._ascii() # -> _annotation_ascii() outstr += "\n" return outstr
Helper function for pretty-printing an exemplar sentence for a lexical unit. :param sent: An annotation set or exemplar sentence to be printed. :param aset_level: If True, 'sent' is actually an annotation set within a sentence. :type sent: AttrDict :return: A nicely formatted string representation of the exemplar sentence with its target, frame, and FE annotations. :rtype: str
170,746
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap def _annotation_ascii_frames(sent): """ ASCII string rendering of the sentence along with its targets and frame names. Called for all full-text sentences, as well as the few LU sentences with multiple targets (e.g., fn.lu(6412).exemplars[82] has two want.v targets). Line-wrapped to limit the display width. """ # list the target spans and their associated aset index overt = [] for a, aset in enumerate(sent.annotationSet[1:]): for j, k in aset.Target: indexS = f"[{a + 1}]" if aset.status == "UNANN" or aset.LU.status == "Problem": indexS += " " if aset.status == "UNANN": indexS += "!" # warning indicator that there is a frame annotation but no FE annotation if aset.LU.status == "Problem": indexS += "?" # warning indicator that there is a missing LU definition (because the LU has Problem status) overt.append((j, k, aset.LU.frame.name, indexS)) overt = sorted(overt) duplicates = set() for o, (j, k, fname, asetIndex) in enumerate(overt): if o > 0 and j <= overt[o - 1][1]: # multiple annotation sets on the same target # (e.g. due to a coordination construction or multiple annotators) if ( overt[o - 1][:2] == (j, k) and overt[o - 1][2] == fname ): # same target, same frame # splice indices together combinedIndex = ( overt[o - 1][3] + asetIndex ) # e.g., '[1][2]', '[1]! [2]' combinedIndex = combinedIndex.replace(" !", "! ").replace(" ?", "? ") overt[o - 1] = overt[o - 1][:3] + (combinedIndex,) duplicates.add(o) else: # different frames, same or overlapping targets s = sent.text for j, k, fname, asetIndex in overt: s += "\n" + asetIndex + " " + sent.text[j:k] + " :: " + fname s += "\n(Unable to display sentence with targets marked inline due to overlap)" return s for o in reversed(sorted(duplicates)): del overt[o] s0 = sent.text s1 = "" s11 = "" s2 = "" i = 0 adjust = 0 fAbbrevs = OrderedDict() for j, k, fname, asetIndex in overt: if not j >= i: assert j >= i, ( "Overlapping targets?" + ( " UNANN" if any(aset.status == "UNANN" for aset in sent.annotationSet[1:]) else "" ), (j, k, asetIndex), ) s1 += " " * (j - i) + "*" * (k - j) short = fname[: k - j] if (k - j) < len(fname): r = 0 while short in fAbbrevs: if fAbbrevs[short] == fname: break r += 1 short = fname[: k - j - 1] + str(r) else: # short not in fAbbrevs fAbbrevs[short] = fname s11 += " " * (j - i) + short.ljust(k - j) if len(asetIndex) > (k - j): # add space in the sentence to make room for the annotation index amt = len(asetIndex) - (k - j) s0 = ( s0[: k + adjust] + "~" * amt + s0[k + adjust :] ) # '~' to prevent line wrapping s1 = s1[: k + adjust] + " " * amt + s1[k + adjust :] s11 = s11[: k + adjust] + " " * amt + s11[k + adjust :] adjust += amt s2 += " " * (j - i) + asetIndex.ljust(k - j) i = k long_lines = [s0, s1, s11, s2] outstr = "\n\n".join( map("\n".join, zip_longest(*mimic_wrap(long_lines), fillvalue=" ")) ).replace("~", " ") outstr += "\n" if fAbbrevs: outstr += " (" + ", ".join("=".join(pair) for pair in fAbbrevs.items()) + ")" assert len(fAbbrevs) == len(dict(fAbbrevs)), "Abbreviation clash" return outstr def _annotation_ascii_FEs(sent): """ ASCII string rendering of the sentence along with a single target and its FEs. Secondary and tertiary FE layers are included if present. 'sent' can be an FE annotation set or an LU sentence with a single target. Line-wrapped to limit the display width. """ feAbbrevs = OrderedDict() posspec = [] # POS-specific layer spans (e.g., Supp[ort], Cop[ula]) posspec_separate = False for lyr in ("Verb", "Noun", "Adj", "Adv", "Prep", "Scon", "Art"): if lyr in sent and sent[lyr]: for a, b, lbl in sent[lyr]: if ( lbl == "X" ): # skip this, which covers an entire phrase typically containing the target and all its FEs # (but do display the Gov) continue if any(1 for x, y, felbl in sent.FE[0] if x <= a < y or a <= x < b): # overlap between one of the POS-specific layers and first FE layer posspec_separate = ( True # show POS-specific layers on a separate line ) posspec.append( (a, b, lbl.lower().replace("-", "")) ) # lowercase Cop=>cop, Non-Asp=>nonasp, etc. to distinguish from FE names if posspec_separate: POSSPEC = _annotation_ascii_FE_layer(posspec, {}, feAbbrevs) FE1 = _annotation_ascii_FE_layer( sorted(sent.FE[0] + (posspec if not posspec_separate else [])), sent.FE[1], feAbbrevs, ) FE2 = FE3 = None if "FE2" in sent: FE2 = _annotation_ascii_FE_layer(sent.FE2[0], sent.FE2[1], feAbbrevs) if "FE3" in sent: FE3 = _annotation_ascii_FE_layer(sent.FE3[0], sent.FE3[1], feAbbrevs) for i, j in sent.Target: FE1span, FE1name, FE1exp = FE1 if len(FE1span) < j: FE1span += " " * (j - len(FE1span)) if len(FE1name) < j: FE1name += " " * (j - len(FE1name)) FE1[1] = FE1name FE1[0] = ( FE1span[:i] + FE1span[i:j].replace(" ", "*").replace("-", "=") + FE1span[j:] ) long_lines = [sent.text] if posspec_separate: long_lines.extend(POSSPEC[:2]) long_lines.extend([FE1[0], FE1[1] + FE1[2]]) # lines with no length limit if FE2: long_lines.extend([FE2[0], FE2[1] + FE2[2]]) if FE3: long_lines.extend([FE3[0], FE3[1] + FE3[2]]) long_lines.append("") outstr = "\n".join( map("\n".join, zip_longest(*mimic_wrap(long_lines), fillvalue=" ")) ) if feAbbrevs: outstr += "(" + ", ".join("=".join(pair) for pair in feAbbrevs.items()) + ")" assert len(feAbbrevs) == len(dict(feAbbrevs)), "Abbreviation clash" outstr += "\n" return outstr The provided code snippet includes necessary dependencies for implementing the `_annotation_ascii` function. Write a Python function `def _annotation_ascii(sent)` to solve the following problem: Given a sentence or FE annotation set, construct the width-limited string showing an ASCII visualization of the sentence's annotations, calling either _annotation_ascii_frames() or _annotation_ascii_FEs() as appropriate. This will be attached as a method to appropriate AttrDict instances and called in the full pretty-printing of the instance. Here is the function: def _annotation_ascii(sent): """ Given a sentence or FE annotation set, construct the width-limited string showing an ASCII visualization of the sentence's annotations, calling either _annotation_ascii_frames() or _annotation_ascii_FEs() as appropriate. This will be attached as a method to appropriate AttrDict instances and called in the full pretty-printing of the instance. """ if sent._type == "fulltext_sentence" or ( "annotationSet" in sent and len(sent.annotationSet) > 2 ): # a full-text sentence OR sentence with multiple targets. # (multiple targets = >2 annotation sets, because the first annotation set is POS.) return _annotation_ascii_frames(sent) else: # an FE annotation set, or an LU sentence with 1 target return _annotation_ascii_FEs(sent)
Given a sentence or FE annotation set, construct the width-limited string showing an ASCII visualization of the sentence's annotations, calling either _annotation_ascii_frames() or _annotation_ascii_FEs() as appropriate. This will be attached as a method to appropriate AttrDict instances and called in the full pretty-printing of the instance.
170,747
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap def _pretty_longstring(defstr, prefix="", wrap_at=65): """ Helper function for pretty-printing a long string. :param defstr: The string to be printed. :type defstr: str :return: A nicely formatted string representation of the long string. :rtype: str """ outstr = "" for line in textwrap.fill(defstr, wrap_at).split("\n"): outstr += prefix + line + "\n" return outstr The provided code snippet includes necessary dependencies for implementing the `_pretty_fe` function. Write a Python function `def _pretty_fe(fe)` to solve the following problem: Helper function for pretty-printing a frame element. :param fe: The frame element to be printed. :type fe: AttrDict :return: A nicely formatted string representation of the frame element. :rtype: str Here is the function: def _pretty_fe(fe): """ Helper function for pretty-printing a frame element. :param fe: The frame element to be printed. :type fe: AttrDict :return: A nicely formatted string representation of the frame element. :rtype: str """ fekeys = fe.keys() outstr = "" outstr += "frame element ({0.ID}): {0.name}\n of {1.name}({1.ID})\n".format( fe, fe.frame ) if "definition" in fekeys: outstr += "[definition]\n" outstr += _pretty_longstring(fe.definition, " ") if "abbrev" in fekeys: outstr += f"[abbrev] {fe.abbrev}\n" if "coreType" in fekeys: outstr += f"[coreType] {fe.coreType}\n" if "requiresFE" in fekeys: outstr += "[requiresFE] " if fe.requiresFE is None: outstr += "<None>\n" else: outstr += f"{fe.requiresFE.name}({fe.requiresFE.ID})\n" if "excludesFE" in fekeys: outstr += "[excludesFE] " if fe.excludesFE is None: outstr += "<None>\n" else: outstr += f"{fe.excludesFE.name}({fe.excludesFE.ID})\n" if "semType" in fekeys: outstr += "[semType] " if fe.semType is None: outstr += "<None>\n" else: outstr += "\n " + f"{fe.semType.name}({fe.semType.ID})" + "\n" return outstr
Helper function for pretty-printing a frame element. :param fe: The frame element to be printed. :type fe: AttrDict :return: A nicely formatted string representation of the frame element. :rtype: str
170,748
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap def _pretty_longstring(defstr, prefix="", wrap_at=65): """ Helper function for pretty-printing a long string. :param defstr: The string to be printed. :type defstr: str :return: A nicely formatted string representation of the long string. :rtype: str """ outstr = "" for line in textwrap.fill(defstr, wrap_at).split("\n"): outstr += prefix + line + "\n" return outstr The provided code snippet includes necessary dependencies for implementing the `_pretty_frame` function. Write a Python function `def _pretty_frame(frame)` to solve the following problem: Helper function for pretty-printing a frame. :param frame: The frame to be printed. :type frame: AttrDict :return: A nicely formatted string representation of the frame. :rtype: str Here is the function: def _pretty_frame(frame): """ Helper function for pretty-printing a frame. :param frame: The frame to be printed. :type frame: AttrDict :return: A nicely formatted string representation of the frame. :rtype: str """ outstr = "" outstr += "frame ({0.ID}): {0.name}\n\n".format(frame) outstr += f"[URL] {frame.URL}\n\n" outstr += "[definition]\n" outstr += _pretty_longstring(frame.definition, " ") + "\n" outstr += f"[semTypes] {len(frame.semTypes)} semantic types\n" outstr += ( " " * (len(frame.semTypes) > 0) + ", ".join(f"{x.name}({x.ID})" for x in frame.semTypes) + "\n" * (len(frame.semTypes) > 0) ) outstr += "\n[frameRelations] {} frame relations\n".format( len(frame.frameRelations) ) outstr += " " + "\n ".join(repr(frel) for frel in frame.frameRelations) + "\n" outstr += f"\n[lexUnit] {len(frame.lexUnit)} lexical units\n" lustrs = [] for luName, lu in sorted(frame.lexUnit.items()): tmpstr = f"{luName} ({lu.ID})" lustrs.append(tmpstr) outstr += "{}\n".format(_pretty_longstring(", ".join(lustrs), prefix=" ")) outstr += f"\n[FE] {len(frame.FE)} frame elements\n" fes = {} for feName, fe in sorted(frame.FE.items()): try: fes[fe.coreType].append(f"{feName} ({fe.ID})") except KeyError: fes[fe.coreType] = [] fes[fe.coreType].append(f"{feName} ({fe.ID})") for ct in sorted( fes.keys(), key=lambda ct2: [ "Core", "Core-Unexpressed", "Peripheral", "Extra-Thematic", ].index(ct2), ): outstr += "{:>16}: {}\n".format(ct, ", ".join(sorted(fes[ct]))) outstr += "\n[FEcoreSets] {} frame element core sets\n".format( len(frame.FEcoreSets) ) outstr += ( " " + "\n ".join( ", ".join([x.name for x in coreSet]) for coreSet in frame.FEcoreSets ) + "\n" ) return outstr
Helper function for pretty-printing a frame. :param frame: The frame to be printed. :type frame: AttrDict :return: A nicely formatted string representation of the frame. :rtype: str
170,749
import itertools import os import re import sys import textwrap import types from collections import OrderedDict, defaultdict from itertools import zip_longest from operator import itemgetter from pprint import pprint from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap framenet: FramenetCorpusReader = LazyCorpusLoader( "framenet_v17", FramenetCorpusReader, [ "frRelation.xml", "frameIndex.xml", "fulltextIndex.xml", "luIndex.xml", "semTypes.xml", ], ) def demo(): from nltk.corpus import framenet as fn # # It is not necessary to explicitly build the indexes by calling # buildindexes(). We do this here just for demo purposes. If the # indexes are not built explicitly, they will be built as needed. # print("Building the indexes...") fn.buildindexes() # # Get some statistics about the corpus # print("Number of Frames:", len(fn.frames())) print("Number of Lexical Units:", len(fn.lus())) print("Number of annotated documents:", len(fn.docs())) print() # # Frames # print( 'getting frames whose name matches the (case insensitive) regex: "(?i)medical"' ) medframes = fn.frames(r"(?i)medical") print(f'Found {len(medframes)} Frames whose name matches "(?i)medical":') print([(f.name, f.ID) for f in medframes]) # # store the first frame in the list of frames # tmp_id = medframes[0].ID m_frame = fn.frame(tmp_id) # reads all info for the frame # # get the frame relations # print( '\nNumber of frame relations for the "{}" ({}) frame:'.format( m_frame.name, m_frame.ID ), len(m_frame.frameRelations), ) for fr in m_frame.frameRelations: print(" ", fr) # # get the names of the Frame Elements # print( f'\nNumber of Frame Elements in the "{m_frame.name}" frame:', len(m_frame.FE), ) print(" ", [x for x in m_frame.FE]) # # get the names of the "Core" Frame Elements # print(f'\nThe "core" Frame Elements in the "{m_frame.name}" frame:') print(" ", [x.name for x in m_frame.FE.values() if x.coreType == "Core"]) # # get all of the Lexical Units that are incorporated in the # 'Ailment' FE of the 'Medical_conditions' frame (id=239) # print('\nAll Lexical Units that are incorporated in the "Ailment" FE:') m_frame = fn.frame(239) ailment_lus = [ x for x in m_frame.lexUnit.values() if "incorporatedFE" in x and x.incorporatedFE == "Ailment" ] print(" ", [x.name for x in ailment_lus]) # # get all of the Lexical Units for the frame # print( f'\nNumber of Lexical Units in the "{m_frame.name}" frame:', len(m_frame.lexUnit), ) print(" ", [x.name for x in m_frame.lexUnit.values()][:5], "...") # # get basic info on the second LU in the frame # tmp_id = m_frame.lexUnit["ailment.n"].ID # grab the id of the specified LU luinfo = fn.lu_basic(tmp_id) # get basic info on the LU print(f"\nInformation on the LU: {luinfo.name}") pprint(luinfo) # # Get a list of all of the corpora used for fulltext annotation # print("\nNames of all of the corpora used for fulltext annotation:") allcorpora = {x.corpname for x in fn.docs_metadata()} pprint(list(allcorpora)) # # Get the names of the annotated documents in the first corpus # firstcorp = list(allcorpora)[0] firstcorp_docs = fn.docs(firstcorp) print(f'\nNames of the annotated documents in the "{firstcorp}" corpus:') pprint([x.filename for x in firstcorp_docs]) # # Search for frames containing LUs whose name attribute matches a # regexp pattern. # # Note: if you were going to be doing a lot of this type of # searching, you'd want to build an index that maps from # lemmas to frames because each time frames_by_lemma() is # called, it has to search through ALL of the frame XML files # in the db. print( '\nSearching for all Frames that have a lemma that matches the regexp: "^run.v$":' ) pprint(fn.frames_by_lemma(r"^run.v$"))
null
170,750
import re from xml.etree import ElementTree from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * from nltk.tokenize import * The provided code snippet includes necessary dependencies for implementing the `_fixXML` function. Write a Python function `def _fixXML(text)` to solve the following problem: Fix the various issues with Senseval pseudo-XML. Here is the function: def _fixXML(text): """ Fix the various issues with Senseval pseudo-XML. """ # <~> or <^> => ~ or ^ text = re.sub(r"<([~\^])>", r"\1", text) # fix lone & text = re.sub(r"(\s+)\&(\s+)", r"\1&amp;\2", text) # fix """ text = re.sub(r'"""', "'\"'", text) # fix <s snum=dd> => <s snum="dd"/> text = re.sub(r'(<[^<]*snum=)([^">]+)>', r'\1"\2"/>', text) # fix foreign word tag text = re.sub(r"<\&frasl>\s*<p[^>]*>", "FRASL", text) # remove <&I .> text = re.sub(r"<\&I[^>]*>", "", text) # fix <{word}> text = re.sub(r"<{([^}]+)}>", r"\1", text) # remove <@>, <p>, </p> text = re.sub(r"<(@|/?p)>", r"", text) # remove <&M .> and <&T .> and <&Ms .> text = re.sub(r"<&\w+ \.>", r"", text) # remove <!DOCTYPE... > lines text = re.sub(r"<!DOCTYPE[^>]*>", r"", text) # remove <[hi]> and <[/p]> etc text = re.sub(r"<\[\/?[^>]+\]*>", r"", text) # take the thing out of the brackets: <&hellip;> text = re.sub(r"<(\&\w+;)>", r"\1", text) # and remove the & for those patterns that aren't regular XML text = re.sub(r"&(?!amp|gt|lt|apos|quot)", r"", text) # fix 'abc <p="foo"/>' style tags - now <wf pos="foo">abc</wf> text = re.sub( r'[ \t]*([^<>\s]+?)[ \t]*<p="([^"]*"?)"/>', r' <wf pos="\2">\1</wf>', text ) text = re.sub(r'\s*"\s*<p=\'"\'/>', " <wf pos='\"'>\"</wf>", text) return text
Fix the various issues with Senseval pseudo-XML.
170,751
import sys from nltk.corpus.reader import util from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * class ChasenCorpusReader(CorpusReader): def __init__(self, root, fileids, encoding="utf8", sent_splitter=None): self._sent_splitter = sent_splitter CorpusReader.__init__(self, root, fileids, encoding) def words(self, fileids=None): return concat( [ ChasenCorpusView(fileid, enc, False, False, False, self._sent_splitter) for (fileid, enc) in self.abspaths(fileids, True) ] ) def tagged_words(self, fileids=None): return concat( [ ChasenCorpusView(fileid, enc, True, False, False, self._sent_splitter) for (fileid, enc) in self.abspaths(fileids, True) ] ) def sents(self, fileids=None): return concat( [ ChasenCorpusView(fileid, enc, False, True, False, self._sent_splitter) for (fileid, enc) in self.abspaths(fileids, True) ] ) def tagged_sents(self, fileids=None): return concat( [ ChasenCorpusView(fileid, enc, True, True, False, self._sent_splitter) for (fileid, enc) in self.abspaths(fileids, True) ] ) def paras(self, fileids=None): return concat( [ ChasenCorpusView(fileid, enc, False, True, True, self._sent_splitter) for (fileid, enc) in self.abspaths(fileids, True) ] ) def tagged_paras(self, fileids=None): return concat( [ ChasenCorpusView(fileid, enc, True, True, True, self._sent_splitter) for (fileid, enc) in self.abspaths(fileids, True) ] ) class LazyCorpusLoader: """ To see the API documentation for this lazily loaded corpus, first run corpus.ensure_loaded(), and then run help(this_corpus). LazyCorpusLoader is a proxy object which is used to stand in for a corpus object before the corpus is loaded. This allows NLTK to create an object for each corpus, but defer the costs associated with loading those corpora until the first time that they're actually accessed. The first time this object is accessed in any way, it will load the corresponding corpus, and transform itself into that corpus (by modifying its own ``__class__`` and ``__dict__`` attributes). If the corpus can not be found, then accessing this object will raise an exception, displaying installation instructions for the NLTK data package. Once they've properly installed the data package (or modified ``nltk.data.path`` to point to its location), they can then use the corpus object without restarting python. :param name: The name of the corpus :type name: str :param reader_cls: The specific CorpusReader class, e.g. PlaintextCorpusReader, WordListCorpusReader :type reader: nltk.corpus.reader.api.CorpusReader :param nltk_data_subdir: The subdirectory where the corpus is stored. :type nltk_data_subdir: str :param `*args`: Any other non-keywords arguments that `reader_cls` might need. :param `**kwargs`: Any other keywords arguments that `reader_cls` might need. """ def __init__(self, name, reader_cls, *args, **kwargs): from nltk.corpus.reader.api import CorpusReader assert issubclass(reader_cls, CorpusReader) self.__name = self.__name__ = name self.__reader_cls = reader_cls # If nltk_data_subdir is set explicitly if "nltk_data_subdir" in kwargs: # Use the specified subdirectory path self.subdir = kwargs["nltk_data_subdir"] # Pops the `nltk_data_subdir` argument, we don't need it anymore. kwargs.pop("nltk_data_subdir", None) else: # Otherwise use 'nltk_data/corpora' self.subdir = "corpora" self.__args = args self.__kwargs = kwargs def __load(self): # Find the corpus root directory. zip_name = re.sub(r"(([^/]+)(/.*)?)", r"\2.zip/\1/", self.__name) if TRY_ZIPFILE_FIRST: try: root = nltk.data.find(f"{self.subdir}/{zip_name}") except LookupError as e: try: root = nltk.data.find(f"{self.subdir}/{self.__name}") except LookupError: raise e else: try: root = nltk.data.find(f"{self.subdir}/{self.__name}") except LookupError as e: try: root = nltk.data.find(f"{self.subdir}/{zip_name}") except LookupError: raise e # Load the corpus. corpus = self.__reader_cls(root, *self.__args, **self.__kwargs) # This is where the magic happens! Transform ourselves into # the corpus by modifying our own __dict__ and __class__ to # match that of the corpus. args, kwargs = self.__args, self.__kwargs name, reader_cls = self.__name, self.__reader_cls self.__dict__ = corpus.__dict__ self.__class__ = corpus.__class__ # _unload support: assign __dict__ and __class__ back, then do GC. # after reassigning __dict__ there shouldn't be any references to # corpus data so the memory should be deallocated after gc.collect() def _unload(self): lazy_reader = LazyCorpusLoader(name, reader_cls, *args, **kwargs) self.__dict__ = lazy_reader.__dict__ self.__class__ = lazy_reader.__class__ gc.collect() self._unload = _make_bound_method(_unload, self) def __getattr__(self, attr): # Fix for inspect.isclass under Python 2.6 # (see https://bugs.python.org/issue1225107). # Without this fix tests may take extra 1.5GB RAM # because all corpora gets loaded during test collection. if attr == "__bases__": raise AttributeError("LazyCorpusLoader object has no attribute '__bases__'") self.__load() # This looks circular, but its not, since __load() changes our # __class__ to something new: return getattr(self, attr) def __repr__(self): return "<{} in {!r} (not loaded yet)>".format( self.__reader_cls.__name__, ".../corpora/" + self.__name, ) def _unload(self): # If an exception occurs during corpus loading then # '_unload' method may be unattached, so __getattr__ can be called; # we shouldn't trigger corpus loading again in this case. pass def demo(): import nltk from nltk.corpus.util import LazyCorpusLoader jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8") print("/".join(jeita.words()[22100:22140])) print( "\nEOS\n".join( "\n".join("{}/{}".format(w[0], w[1].split("\t")[2]) for w in sent) for sent in jeita.tagged_sents()[2170:2173] ) )
null
170,752
import functools import os import re import tempfile from nltk.corpus.reader.util import concat from nltk.corpus.reader.xmldocs import XMLCorpusReader, XMLCorpusView The provided code snippet includes necessary dependencies for implementing the `_parse_args` function. Write a Python function `def _parse_args(fun)` to solve the following problem: Wraps function arguments: if fileids not specified then function set NKJPCorpusReader paths. Here is the function: def _parse_args(fun): """ Wraps function arguments: if fileids not specified then function set NKJPCorpusReader paths. """ @functools.wraps(fun) def decorator(self, fileids=None, **kwargs): if not fileids: fileids = self._paths return fun(self, fileids, **kwargs) return decorator
Wraps function arguments: if fileids not specified then function set NKJPCorpusReader paths.
170,753
from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * from nltk.toolbox import ToolboxData def demo(): pass
null
170,754
import os import re from functools import reduce from nltk.corpus.reader import TaggedCorpusReader, concat from nltk.corpus.reader.xmldocs import XMLCorpusView def xpath(root, path, ns): return root.findall(path, ns)
null
170,755
import re from nltk.corpus.reader.api import CorpusReader, SyntaxCorpusReader from nltk.corpus.reader.util import ( FileSystemPathPointer, find_corpus_fileids, read_blankline_block, ) from nltk.parse import DependencyGraph class KNBCorpusReader(SyntaxCorpusReader): """ This class implements: - ``__init__``, which specifies the location of the corpus and a method for detecting the sentence blocks in corpus files. - ``_read_block``, which reads a block from the input stream. - ``_word``, which takes a block and returns a list of list of words. - ``_tag``, which takes a block and returns a list of list of tagged words. - ``_parse``, which takes a block and returns a list of parsed sentences. The structure of tagged words: tagged_word = (word(str), tags(tuple)) tags = (surface, reading, lemma, pos1, posid1, pos2, posid2, pos3, posid3, others ...) Usage example >>> from nltk.corpus.util import LazyCorpusLoader >>> knbc = LazyCorpusLoader( ... 'knbc/corpus1', ... KNBCorpusReader, ... r'.*/KN.*', ... encoding='euc-jp', ... ) >>> len(knbc.sents()[0]) 9 """ def __init__(self, root, fileids, encoding="utf8", morphs2str=_morphs2str_default): """ Initialize KNBCorpusReader morphs2str is a function to convert morphlist to str for tree representation for _parse() """ SyntaxCorpusReader.__init__(self, root, fileids, encoding) self.morphs2str = morphs2str def _read_block(self, stream): # blocks are split by blankline (or EOF) - default return read_blankline_block(stream) def _word(self, t): res = [] for line in t.splitlines(): # ignore the Bunsets headers if not re.match(r"EOS|\*|\#|\+", line): cells = line.strip().split(" ") res.append(cells[0]) return res # ignores tagset argument def _tag(self, t, tagset=None): res = [] for line in t.splitlines(): # ignore the Bunsets headers if not re.match(r"EOS|\*|\#|\+", line): cells = line.strip().split(" ") # convert cells to morph tuples res.append((cells[0], " ".join(cells[1:]))) return res def _parse(self, t): dg = DependencyGraph() i = 0 for line in t.splitlines(): if line[0] in "*+": # start of bunsetsu or tag cells = line.strip().split(" ", 3) m = re.match(r"([\-0-9]*)([ADIP])", cells[1]) assert m is not None node = dg.nodes[i] node.update({"address": i, "rel": m.group(2), "word": []}) dep_parent = int(m.group(1)) if dep_parent == -1: dg.root = node else: dg.nodes[dep_parent]["deps"].append(i) i += 1 elif line[0] != "#": # normal morph cells = line.strip().split(" ") # convert cells to morph tuples morph = cells[0], " ".join(cells[1:]) dg.nodes[i - 1]["word"].append(morph) if self.morphs2str: for node in dg.nodes.values(): node["word"] = self.morphs2str(node["word"]) return dg.tree() def find_corpus_fileids(root, regexp): if not isinstance(root, PathPointer): raise TypeError("find_corpus_fileids: expected a PathPointer") regexp += "$" # Find fileids in a zipfile: scan the zipfile's namelist. Filter # out entries that end in '/' -- they're directories. if isinstance(root, ZipFilePathPointer): fileids = [ name[len(root.entry) :] for name in root.zipfile.namelist() if not name.endswith("/") ] items = [name for name in fileids if re.match(regexp, name)] return sorted(items) # Find fileids in a directory: use os.walk to search all (proper # or symlinked) subdirectories, and match paths against the regexp. elif isinstance(root, FileSystemPathPointer): items = [] for dirname, subdirs, fileids in os.walk(root.path): prefix = "".join("%s/" % p for p in _path_from(root.path, dirname)) items += [ prefix + fileid for fileid in fileids if re.match(regexp, prefix + fileid) ] # Don't visit svn directories: if ".svn" in subdirs: subdirs.remove(".svn") return sorted(items) else: raise AssertionError("Don't know how to handle %r" % root) class LazyCorpusLoader: """ To see the API documentation for this lazily loaded corpus, first run corpus.ensure_loaded(), and then run help(this_corpus). LazyCorpusLoader is a proxy object which is used to stand in for a corpus object before the corpus is loaded. This allows NLTK to create an object for each corpus, but defer the costs associated with loading those corpora until the first time that they're actually accessed. The first time this object is accessed in any way, it will load the corresponding corpus, and transform itself into that corpus (by modifying its own ``__class__`` and ``__dict__`` attributes). If the corpus can not be found, then accessing this object will raise an exception, displaying installation instructions for the NLTK data package. Once they've properly installed the data package (or modified ``nltk.data.path`` to point to its location), they can then use the corpus object without restarting python. :param name: The name of the corpus :type name: str :param reader_cls: The specific CorpusReader class, e.g. PlaintextCorpusReader, WordListCorpusReader :type reader: nltk.corpus.reader.api.CorpusReader :param nltk_data_subdir: The subdirectory where the corpus is stored. :type nltk_data_subdir: str :param `*args`: Any other non-keywords arguments that `reader_cls` might need. :param `**kwargs`: Any other keywords arguments that `reader_cls` might need. """ def __init__(self, name, reader_cls, *args, **kwargs): from nltk.corpus.reader.api import CorpusReader assert issubclass(reader_cls, CorpusReader) self.__name = self.__name__ = name self.__reader_cls = reader_cls # If nltk_data_subdir is set explicitly if "nltk_data_subdir" in kwargs: # Use the specified subdirectory path self.subdir = kwargs["nltk_data_subdir"] # Pops the `nltk_data_subdir` argument, we don't need it anymore. kwargs.pop("nltk_data_subdir", None) else: # Otherwise use 'nltk_data/corpora' self.subdir = "corpora" self.__args = args self.__kwargs = kwargs def __load(self): # Find the corpus root directory. zip_name = re.sub(r"(([^/]+)(/.*)?)", r"\2.zip/\1/", self.__name) if TRY_ZIPFILE_FIRST: try: root = nltk.data.find(f"{self.subdir}/{zip_name}") except LookupError as e: try: root = nltk.data.find(f"{self.subdir}/{self.__name}") except LookupError: raise e else: try: root = nltk.data.find(f"{self.subdir}/{self.__name}") except LookupError as e: try: root = nltk.data.find(f"{self.subdir}/{zip_name}") except LookupError: raise e # Load the corpus. corpus = self.__reader_cls(root, *self.__args, **self.__kwargs) # This is where the magic happens! Transform ourselves into # the corpus by modifying our own __dict__ and __class__ to # match that of the corpus. args, kwargs = self.__args, self.__kwargs name, reader_cls = self.__name, self.__reader_cls self.__dict__ = corpus.__dict__ self.__class__ = corpus.__class__ # _unload support: assign __dict__ and __class__ back, then do GC. # after reassigning __dict__ there shouldn't be any references to # corpus data so the memory should be deallocated after gc.collect() def _unload(self): lazy_reader = LazyCorpusLoader(name, reader_cls, *args, **kwargs) self.__dict__ = lazy_reader.__dict__ self.__class__ = lazy_reader.__class__ gc.collect() self._unload = _make_bound_method(_unload, self) def __getattr__(self, attr): # Fix for inspect.isclass under Python 2.6 # (see https://bugs.python.org/issue1225107). # Without this fix tests may take extra 1.5GB RAM # because all corpora gets loaded during test collection. if attr == "__bases__": raise AttributeError("LazyCorpusLoader object has no attribute '__bases__'") self.__load() # This looks circular, but its not, since __load() changes our # __class__ to something new: return getattr(self, attr) def __repr__(self): return "<{} in {!r} (not loaded yet)>".format( self.__reader_cls.__name__, ".../corpora/" + self.__name, ) def _unload(self): # If an exception occurs during corpus loading then # '_unload' method may be unattached, so __getattr__ can be called; # we shouldn't trigger corpus loading again in this case. pass def demo(): import nltk from nltk.corpus.util import LazyCorpusLoader root = nltk.data.find("corpora/knbc/corpus1") fileids = [ f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*") if re.search(r"\d\-\d\-[\d]+\-[\d]+", f) ] def _knbc_fileids_sort(x): cells = x.split("-") return (cells[0], int(cells[1]), int(cells[2]), int(cells[3])) knbc = LazyCorpusLoader( "knbc/corpus1", KNBCorpusReader, sorted(fileids, key=_knbc_fileids_sort), encoding="euc-jp", ) print(knbc.fileids()[:10]) print("".join(knbc.words()[:100])) print("\n\n".join(str(tree) for tree in knbc.parsed_sents()[:2])) knbc.morphs2str = lambda morphs: "/".join( "{}({})".format(m[0], m[1].split(" ")[2]) for m in morphs if m[0] != "EOS" ).encode("utf-8") print("\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2])) print( "\n".join( " ".join("{}/{}".format(w[0], w[1].split(" ")[2]) for w in sent) for sent in knbc.tagged_sents()[0:2] ) )
null
170,756
from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * from nltk.util import Index def read_cmudict_block(stream): entries = [] while len(entries) < 100: # Read 100 at a time. line = stream.readline() if line == "": return entries # end of file. pieces = line.split() entries.append((pieces[0].lower(), pieces[2:])) return entries
null
170,757
import sys import time from nltk.corpus.reader.api import * from nltk.internals import import_from_stdlib from nltk.tree import Tree The provided code snippet includes necessary dependencies for implementing the `read_timit_block` function. Write a Python function `def read_timit_block(stream)` to solve the following problem: Block reader for timit tagged sentences, which are preceded by a sentence number that will be ignored. Here is the function: def read_timit_block(stream): """ Block reader for timit tagged sentences, which are preceded by a sentence number that will be ignored. """ line = stream.readline() if not line: return [] n, sent = line.split(" ", 1) return [sent]
Block reader for timit tagged sentences, which are preceded by a sentence number that will be ignored.
170,758
import math import os import re import warnings from collections import defaultdict, deque from functools import total_ordering from itertools import chain, islice from operator import itemgetter from nltk.corpus.reader import CorpusReader from nltk.internals import deprecated from nltk.probability import FreqDist from nltk.util import binary_search_file as _binary_search_file def path_similarity(synset1, synset2, verbose=False, simulate_root=True): return synset1.path_similarity( synset2, verbose=verbose, simulate_root=simulate_root )
null
170,759
import math import os import re import warnings from collections import defaultdict, deque from functools import total_ordering from itertools import chain, islice from operator import itemgetter from nltk.corpus.reader import CorpusReader from nltk.internals import deprecated from nltk.probability import FreqDist from nltk.util import binary_search_file as _binary_search_file def lch_similarity(synset1, synset2, verbose=False, simulate_root=True): return synset1.lch_similarity(synset2, verbose=verbose, simulate_root=simulate_root)
null
170,760
import math import os import re import warnings from collections import defaultdict, deque from functools import total_ordering from itertools import chain, islice from operator import itemgetter from nltk.corpus.reader import CorpusReader from nltk.internals import deprecated from nltk.probability import FreqDist from nltk.util import binary_search_file as _binary_search_file def wup_similarity(synset1, synset2, verbose=False, simulate_root=True): return synset1.wup_similarity(synset2, verbose=verbose, simulate_root=simulate_root)
null
170,761
import math import os import re import warnings from collections import defaultdict, deque from functools import total_ordering from itertools import chain, islice from operator import itemgetter from nltk.corpus.reader import CorpusReader from nltk.internals import deprecated from nltk.probability import FreqDist from nltk.util import binary_search_file as _binary_search_file def res_similarity(synset1, synset2, ic, verbose=False): return synset1.res_similarity(synset2, ic, verbose=verbose)
null
170,762
import math import os import re import warnings from collections import defaultdict, deque from functools import total_ordering from itertools import chain, islice from operator import itemgetter from nltk.corpus.reader import CorpusReader from nltk.internals import deprecated from nltk.probability import FreqDist from nltk.util import binary_search_file as _binary_search_file def jcn_similarity(synset1, synset2, ic, verbose=False): return synset1.jcn_similarity(synset2, ic, verbose=verbose)
null
170,763
import math import os import re import warnings from collections import defaultdict, deque from functools import total_ordering from itertools import chain, islice from operator import itemgetter from nltk.corpus.reader import CorpusReader from nltk.internals import deprecated from nltk.probability import FreqDist from nltk.util import binary_search_file as _binary_search_file def lin_similarity(synset1, synset2, ic, verbose=False): return synset1.lin_similarity(synset2, ic, verbose=verbose)
null
170,764
import math import os import re import warnings from collections import defaultdict, deque from functools import total_ordering from itertools import chain, islice from operator import itemgetter from nltk.corpus.reader import CorpusReader from nltk.internals import deprecated from nltk.probability import FreqDist from nltk.util import binary_search_file as _binary_search_file class WordNetError(Exception): """An exception class for wordnet-related errors.""" def information_content(synset, ic): pos = synset._pos if pos == ADJ_SAT: pos = ADJ try: icpos = ic[pos] except KeyError as e: msg = "Information content file has no entries for part-of-speech: %s" raise WordNetError(msg % pos) from e counts = icpos[synset._offset] if counts == 0: return _INF else: return -math.log(counts / icpos[0]) The provided code snippet includes necessary dependencies for implementing the `_lcs_ic` function. Write a Python function `def _lcs_ic(synset1, synset2, ic, verbose=False)` to solve the following problem: Get the information content of the least common subsumer that has the highest information content value. If two nodes have no explicit common subsumer, assume that they share an artificial root node that is the hypernym of all explicit roots. :type synset1: Synset :param synset1: First input synset. :type synset2: Synset :param synset2: Second input synset. Must be the same part of speech as the first synset. :type ic: dict :param ic: an information content object (as returned by ``load_ic()``). :return: The information content of the two synsets and their most informative subsumer Here is the function: def _lcs_ic(synset1, synset2, ic, verbose=False): """ Get the information content of the least common subsumer that has the highest information content value. If two nodes have no explicit common subsumer, assume that they share an artificial root node that is the hypernym of all explicit roots. :type synset1: Synset :param synset1: First input synset. :type synset2: Synset :param synset2: Second input synset. Must be the same part of speech as the first synset. :type ic: dict :param ic: an information content object (as returned by ``load_ic()``). :return: The information content of the two synsets and their most informative subsumer """ if synset1._pos != synset2._pos: raise WordNetError( "Computing the least common subsumer requires " "%s and %s to have the same part of speech." % (synset1, synset2) ) ic1 = information_content(synset1, ic) ic2 = information_content(synset2, ic) subsumers = synset1.common_hypernyms(synset2) if len(subsumers) == 0: subsumer_ic = 0 else: subsumer_ic = max(information_content(s, ic) for s in subsumers) if verbose: print("> LCS Subsumer by content:", subsumer_ic) return ic1, ic2, subsumer_ic
Get the information content of the least common subsumer that has the highest information content value. If two nodes have no explicit common subsumer, assume that they share an artificial root node that is the hypernym of all explicit roots. :type synset1: Synset :param synset1: First input synset. :type synset2: Synset :param synset2: Second input synset. Must be the same part of speech as the first synset. :type ic: dict :param ic: an information content object (as returned by ``load_ic()``). :return: The information content of the two synsets and their most informative subsumer
170,765
import math import os import re import warnings from collections import defaultdict, deque from functools import total_ordering from itertools import chain, islice from operator import itemgetter from nltk.corpus.reader import CorpusReader from nltk.internals import deprecated from nltk.probability import FreqDist from nltk.util import binary_search_file as _binary_search_file def _get_pos(field): if field[-1] == "n": return NOUN elif field[-1] == "v": return VERB else: msg = ( "Unidentified part of speech in WordNet Information Content file " "for field %s" % field ) raise ValueError(msg)
null
170,766
from nltk.corpus.reader.api import * from nltk.corpus.reader.xmldocs import XMLCorpusReader, XMLCorpusView from nltk.tree import Tree def _all_xmlwords_in(elt, result=None): if result is None: result = [] for child in elt: if child.tag in ("wf", "punc"): result.append(child) else: _all_xmlwords_in(child, result) return result
null
170,767
from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * from nltk.corpus.reader.xmldocs import * The provided code snippet includes necessary dependencies for implementing the `norm` function. Write a Python function `def norm(value_string)` to solve the following problem: Normalize the string value in an RTE pair's ``value`` or ``entailment`` attribute as an integer (1, 0). :param value_string: the label used to classify a text/hypothesis pair :type value_string: str :rtype: int Here is the function: def norm(value_string): """ Normalize the string value in an RTE pair's ``value`` or ``entailment`` attribute as an integer (1, 0). :param value_string: the label used to classify a text/hypothesis pair :type value_string: str :rtype: int """ valdict = {"TRUE": 1, "FALSE": 0, "YES": 1, "NO": 0} return valdict[value_string.upper()]
Normalize the string value in an RTE pair's ``value`` or ``entailment`` attribute as an integer (1, 0). :param value_string: the label used to classify a text/hypothesis pair :type value_string: str :rtype: int
170,768
import re from collections import defaultdict from nltk.corpus.reader.util import concat from nltk.corpus.reader.xmldocs import ElementTree, XMLCorpusReader from nltk.util import LazyConcatenation, LazyMap, flatten class CHILDESCorpusReader(XMLCorpusReader): """ Corpus reader for the XML version of the CHILDES corpus. The CHILDES corpus is available at ``https://childes.talkbank.org/``. The XML version of CHILDES is located at ``https://childes.talkbank.org/data-xml/``. Copy the needed parts of the CHILDES XML corpus into the NLTK data directory (``nltk_data/corpora/CHILDES/``). For access to the file text use the usual nltk functions, ``words()``, ``sents()``, ``tagged_words()`` and ``tagged_sents()``. """ def __init__(self, root, fileids, lazy=True): XMLCorpusReader.__init__(self, root, fileids) self._lazy = lazy def words( self, fileids=None, speaker="ALL", stem=False, relation=False, strip_space=True, replace=False, ): """ :return: the given file(s) as a list of words :rtype: list(str) :param speaker: If specified, select specific speaker(s) defined in the corpus. Default is 'ALL' (all participants). Common choices are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude researchers) :param stem: If true, then use word stems instead of word strings. :param relation: If true, then return tuples of (stem, index, dependent_index) :param strip_space: If true, then strip trailing spaces from word tokens. Otherwise, leave the spaces on the tokens. :param replace: If true, then use the replaced (intended) word instead of the original word (e.g., 'wat' will be replaced with 'watch') """ sent = None pos = False if not self._lazy: return [ self._get_words( fileid, speaker, sent, stem, relation, pos, strip_space, replace ) for fileid in self.abspaths(fileids) ] get_words = lambda fileid: self._get_words( fileid, speaker, sent, stem, relation, pos, strip_space, replace ) return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) def tagged_words( self, fileids=None, speaker="ALL", stem=False, relation=False, strip_space=True, replace=False, ): """ :return: the given file(s) as a list of tagged words and punctuation symbols, encoded as tuples ``(word,tag)``. :rtype: list(tuple(str,str)) :param speaker: If specified, select specific speaker(s) defined in the corpus. Default is 'ALL' (all participants). Common choices are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude researchers) :param stem: If true, then use word stems instead of word strings. :param relation: If true, then return tuples of (stem, index, dependent_index) :param strip_space: If true, then strip trailing spaces from word tokens. Otherwise, leave the spaces on the tokens. :param replace: If true, then use the replaced (intended) word instead of the original word (e.g., 'wat' will be replaced with 'watch') """ sent = None pos = True if not self._lazy: return [ self._get_words( fileid, speaker, sent, stem, relation, pos, strip_space, replace ) for fileid in self.abspaths(fileids) ] get_words = lambda fileid: self._get_words( fileid, speaker, sent, stem, relation, pos, strip_space, replace ) return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) def sents( self, fileids=None, speaker="ALL", stem=False, relation=None, strip_space=True, replace=False, ): """ :return: the given file(s) as a list of sentences or utterances, each encoded as a list of word strings. :rtype: list(list(str)) :param speaker: If specified, select specific speaker(s) defined in the corpus. Default is 'ALL' (all participants). Common choices are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude researchers) :param stem: If true, then use word stems instead of word strings. :param relation: If true, then return tuples of ``(str,pos,relation_list)``. If there is manually-annotated relation info, it will return tuples of ``(str,pos,test_relation_list,str,pos,gold_relation_list)`` :param strip_space: If true, then strip trailing spaces from word tokens. Otherwise, leave the spaces on the tokens. :param replace: If true, then use the replaced (intended) word instead of the original word (e.g., 'wat' will be replaced with 'watch') """ sent = True pos = False if not self._lazy: return [ self._get_words( fileid, speaker, sent, stem, relation, pos, strip_space, replace ) for fileid in self.abspaths(fileids) ] get_words = lambda fileid: self._get_words( fileid, speaker, sent, stem, relation, pos, strip_space, replace ) return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) def tagged_sents( self, fileids=None, speaker="ALL", stem=False, relation=None, strip_space=True, replace=False, ): """ :return: the given file(s) as a list of sentences, each encoded as a list of ``(word,tag)`` tuples. :rtype: list(list(tuple(str,str))) :param speaker: If specified, select specific speaker(s) defined in the corpus. Default is 'ALL' (all participants). Common choices are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude researchers) :param stem: If true, then use word stems instead of word strings. :param relation: If true, then return tuples of ``(str,pos,relation_list)``. If there is manually-annotated relation info, it will return tuples of ``(str,pos,test_relation_list,str,pos,gold_relation_list)`` :param strip_space: If true, then strip trailing spaces from word tokens. Otherwise, leave the spaces on the tokens. :param replace: If true, then use the replaced (intended) word instead of the original word (e.g., 'wat' will be replaced with 'watch') """ sent = True pos = True if not self._lazy: return [ self._get_words( fileid, speaker, sent, stem, relation, pos, strip_space, replace ) for fileid in self.abspaths(fileids) ] get_words = lambda fileid: self._get_words( fileid, speaker, sent, stem, relation, pos, strip_space, replace ) return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) def corpus(self, fileids=None): """ :return: the given file(s) as a dict of ``(corpus_property_key, value)`` :rtype: list(dict) """ if not self._lazy: return [self._get_corpus(fileid) for fileid in self.abspaths(fileids)] return LazyMap(self._get_corpus, self.abspaths(fileids)) def _get_corpus(self, fileid): results = dict() xmldoc = ElementTree.parse(fileid).getroot() for key, value in xmldoc.items(): results[key] = value return results def participants(self, fileids=None): """ :return: the given file(s) as a dict of ``(participant_property_key, value)`` :rtype: list(dict) """ if not self._lazy: return [self._get_participants(fileid) for fileid in self.abspaths(fileids)] return LazyMap(self._get_participants, self.abspaths(fileids)) def _get_participants(self, fileid): # multidimensional dicts def dictOfDicts(): return defaultdict(dictOfDicts) xmldoc = ElementTree.parse(fileid).getroot() # getting participants' data pat = dictOfDicts() for participant in xmldoc.findall( f".//{{{NS}}}Participants/{{{NS}}}participant" ): for (key, value) in participant.items(): pat[participant.get("id")][key] = value return pat def age(self, fileids=None, speaker="CHI", month=False): """ :return: the given file(s) as string or int :rtype: list or int :param month: If true, return months instead of year-month-date """ if not self._lazy: return [ self._get_age(fileid, speaker, month) for fileid in self.abspaths(fileids) ] get_age = lambda fileid: self._get_age(fileid, speaker, month) return LazyMap(get_age, self.abspaths(fileids)) def _get_age(self, fileid, speaker, month): xmldoc = ElementTree.parse(fileid).getroot() for pat in xmldoc.findall(f".//{{{NS}}}Participants/{{{NS}}}participant"): try: if pat.get("id") == speaker: age = pat.get("age") if month: age = self.convert_age(age) return age # some files don't have age data except (TypeError, AttributeError) as e: return None def convert_age(self, age_year): "Caclculate age in months from a string in CHILDES format" m = re.match(r"P(\d+)Y(\d+)M?(\d?\d?)D?", age_year) age_month = int(m.group(1)) * 12 + int(m.group(2)) try: if int(m.group(3)) > 15: age_month += 1 # some corpora don't have age information? except ValueError as e: pass return age_month def MLU(self, fileids=None, speaker="CHI"): """ :return: the given file(s) as a floating number :rtype: list(float) """ if not self._lazy: return [ self._getMLU(fileid, speaker=speaker) for fileid in self.abspaths(fileids) ] get_MLU = lambda fileid: self._getMLU(fileid, speaker=speaker) return LazyMap(get_MLU, self.abspaths(fileids)) def _getMLU(self, fileid, speaker): sents = self._get_words( fileid, speaker=speaker, sent=True, stem=True, relation=False, pos=True, strip_space=True, replace=True, ) results = [] lastSent = [] numFillers = 0 sentDiscount = 0 for sent in sents: posList = [pos for (word, pos) in sent] # if any part of the sentence is intelligible if any(pos == "unk" for pos in posList): continue # if the sentence is null elif sent == []: continue # if the sentence is the same as the last sent elif sent == lastSent: continue else: results.append([word for (word, pos) in sent]) # count number of fillers if len({"co", None}.intersection(posList)) > 0: numFillers += posList.count("co") numFillers += posList.count(None) sentDiscount += 1 lastSent = sent try: thisWordList = flatten(results) # count number of morphemes # (e.g., 'read' = 1 morpheme but 'read-PAST' is 2 morphemes) numWords = ( len(flatten([word.split("-") for word in thisWordList])) - numFillers ) numSents = len(results) - sentDiscount mlu = numWords / numSents except ZeroDivisionError: mlu = 0 # return {'mlu':mlu,'wordNum':numWords,'sentNum':numSents} return mlu def _get_words( self, fileid, speaker, sent, stem, relation, pos, strip_space, replace ): if ( isinstance(speaker, str) and speaker != "ALL" ): # ensure we have a list of speakers speaker = [speaker] xmldoc = ElementTree.parse(fileid).getroot() # processing each xml doc results = [] for xmlsent in xmldoc.findall(".//{%s}u" % NS): sents = [] # select speakers if speaker == "ALL" or xmlsent.get("who") in speaker: for xmlword in xmlsent.findall(".//{%s}w" % NS): infl = None suffixStem = None suffixTag = None # getting replaced words if replace and xmlsent.find(f".//{{{NS}}}w/{{{NS}}}replacement"): xmlword = xmlsent.find( f".//{{{NS}}}w/{{{NS}}}replacement/{{{NS}}}w" ) elif replace and xmlsent.find(f".//{{{NS}}}w/{{{NS}}}wk"): xmlword = xmlsent.find(f".//{{{NS}}}w/{{{NS}}}wk") # get text if xmlword.text: word = xmlword.text else: word = "" # strip tailing space if strip_space: word = word.strip() # stem if relation or stem: try: xmlstem = xmlword.find(".//{%s}stem" % NS) word = xmlstem.text except AttributeError as e: pass # if there is an inflection try: xmlinfl = xmlword.find( f".//{{{NS}}}mor/{{{NS}}}mw/{{{NS}}}mk" ) word += "-" + xmlinfl.text except: pass # if there is a suffix try: xmlsuffix = xmlword.find( ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}stem" % (NS, NS, NS, NS) ) suffixStem = xmlsuffix.text except AttributeError: suffixStem = "" if suffixStem: word += "~" + suffixStem # pos if relation or pos: try: xmlpos = xmlword.findall(".//{%s}c" % NS) xmlpos2 = xmlword.findall(".//{%s}s" % NS) if xmlpos2 != []: tag = xmlpos[0].text + ":" + xmlpos2[0].text else: tag = xmlpos[0].text except (AttributeError, IndexError) as e: tag = "" try: xmlsuffixpos = xmlword.findall( ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}pos/{%s}c" % (NS, NS, NS, NS, NS) ) xmlsuffixpos2 = xmlword.findall( ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}pos/{%s}s" % (NS, NS, NS, NS, NS) ) if xmlsuffixpos2: suffixTag = ( xmlsuffixpos[0].text + ":" + xmlsuffixpos2[0].text ) else: suffixTag = xmlsuffixpos[0].text except: pass if suffixTag: tag += "~" + suffixTag word = (word, tag) # relational # the gold standard is stored in # <mor></mor><mor type="trn"><gra type="grt"> if relation == True: for xmlstem_rel in xmlword.findall( f".//{{{NS}}}mor/{{{NS}}}gra" ): if not xmlstem_rel.get("type") == "grt": word = ( word[0], word[1], xmlstem_rel.get("index") + "|" + xmlstem_rel.get("head") + "|" + xmlstem_rel.get("relation"), ) else: word = ( word[0], word[1], word[2], word[0], word[1], xmlstem_rel.get("index") + "|" + xmlstem_rel.get("head") + "|" + xmlstem_rel.get("relation"), ) try: for xmlpost_rel in xmlword.findall( f".//{{{NS}}}mor/{{{NS}}}mor-post/{{{NS}}}gra" ): if not xmlpost_rel.get("type") == "grt": suffixStem = ( suffixStem[0], suffixStem[1], xmlpost_rel.get("index") + "|" + xmlpost_rel.get("head") + "|" + xmlpost_rel.get("relation"), ) else: suffixStem = ( suffixStem[0], suffixStem[1], suffixStem[2], suffixStem[0], suffixStem[1], xmlpost_rel.get("index") + "|" + xmlpost_rel.get("head") + "|" + xmlpost_rel.get("relation"), ) except: pass sents.append(word) if sent or relation: results.append(sents) else: results.extend(sents) return LazyMap(lambda x: x, results) # Ready-to-use browser opener """ The base URL for viewing files on the childes website. This shouldn't need to be changed, unless CHILDES changes the configuration of their server or unless the user sets up their own corpus webserver. """ childes_url_base = r"https://childes.talkbank.org/browser/index.php?url=" def webview_file(self, fileid, urlbase=None): """Map a corpus file to its web version on the CHILDES website, and open it in a web browser. The complete URL to be used is: childes.childes_url_base + urlbase + fileid.replace('.xml', '.cha') If no urlbase is passed, we try to calculate it. This requires that the childes corpus was set up to mirror the folder hierarchy under childes.psy.cmu.edu/data-xml/, e.g.: nltk_data/corpora/childes/Eng-USA/Cornell/??? or nltk_data/corpora/childes/Romance/Spanish/Aguirre/??? The function first looks (as a special case) if "Eng-USA" is on the path consisting of <corpus root>+fileid; then if "childes", possibly followed by "data-xml", appears. If neither one is found, we use the unmodified fileid and hope for the best. If this is not right, specify urlbase explicitly, e.g., if the corpus root points to the Cornell folder, urlbase='Eng-USA/Cornell'. """ import webbrowser if urlbase: path = urlbase + "/" + fileid else: full = self.root + "/" + fileid full = re.sub(r"\\", "/", full) if "/childes/" in full.lower(): # Discard /data-xml/ if present path = re.findall(r"(?i)/childes(?:/data-xml)?/(.*)\.xml", full)[0] elif "eng-usa" in full.lower(): path = "Eng-USA/" + re.findall(r"/(?i)Eng-USA/(.*)\.xml", full)[0] else: path = fileid # Strip ".xml" and add ".cha", as necessary: if path.endswith(".xml"): path = path[:-4] if not path.endswith(".cha"): path = path + ".cha" url = self.childes_url_base + path webbrowser.open_new_tab(url) print("Opening in browser:", url) # Pausing is a good idea, but it's up to the user... # raw_input("Hit Return to continue") def find(resource_name, paths=None): """ Find the given resource by searching through the directories and zip files in paths, where a None or empty string specifies an absolute path. Returns a corresponding path name. If the given resource is not found, raise a ``LookupError``, whose message gives a pointer to the installation instructions for the NLTK downloader. Zip File Handling: - If ``resource_name`` contains a component with a ``.zip`` extension, then it is assumed to be a zipfile; and the remaining path components are used to look inside the zipfile. - If any element of ``nltk.data.path`` has a ``.zip`` extension, then it is assumed to be a zipfile. - If a given resource name that does not contain any zipfile component is not found initially, then ``find()`` will make a second attempt to find that resource, by replacing each component *p* in the path with *p.zip/p*. For example, this allows ``find()`` to map the resource name ``corpora/chat80/cities.pl`` to a zip file path pointer to ``corpora/chat80.zip/chat80/cities.pl``. - When using ``find()`` to locate a directory contained in a zipfile, the resource name must end with the forward slash character. Otherwise, ``find()`` will not locate the directory. :type resource_name: str or unicode :param resource_name: The name of the resource to search for. Resource names are posix-style relative path names, such as ``corpora/brown``. Directory names will be automatically converted to a platform-appropriate path separator. :rtype: str """ resource_name = normalize_resource_name(resource_name, True) # Resolve default paths at runtime in-case the user overrides # nltk.data.path if paths is None: paths = path # Check if the resource name includes a zipfile name m = re.match(r"(.*\.zip)/?(.*)$|", resource_name) zipfile, zipentry = m.groups() # Check each item in our path for path_ in paths: # Is the path item a zipfile? if path_ and (os.path.isfile(path_) and path_.endswith(".zip")): try: return ZipFilePathPointer(path_, resource_name) except OSError: # resource not in zipfile continue # Is the path item a directory or is resource_name an absolute path? elif not path_ or os.path.isdir(path_): if zipfile is None: p = os.path.join(path_, url2pathname(resource_name)) if os.path.exists(p): if p.endswith(".gz"): return GzipFileSystemPathPointer(p) else: return FileSystemPathPointer(p) else: p = os.path.join(path_, url2pathname(zipfile)) if os.path.exists(p): try: return ZipFilePathPointer(p, zipentry) except OSError: # resource not in zipfile continue # Fallback: if the path doesn't include a zip file, then try # again, assuming that one of the path components is inside a # zipfile of the same name. if zipfile is None: pieces = resource_name.split("/") for i in range(len(pieces)): modified_name = "/".join(pieces[:i] + [pieces[i] + ".zip"] + pieces[i:]) try: return find(modified_name, paths) except LookupError: pass # Identify the package (i.e. the .zip file) to download. resource_zipname = resource_name.split("/")[1] if resource_zipname.endswith(".zip"): resource_zipname = resource_zipname.rpartition(".")[0] # Display a friendly error message if the resource wasn't found: msg = str( "Resource \33[93m{resource}\033[0m not found.\n" "Please use the NLTK Downloader to obtain the resource:\n\n" "\33[31m" # To display red text in terminal. ">>> import nltk\n" ">>> nltk.download('{resource}')\n" "\033[0m" ).format(resource=resource_zipname) msg = textwrap_indent(msg) msg += "\n For more information see: https://www.nltk.org/data.html\n" msg += "\n Attempted to load \33[93m{resource_name}\033[0m\n".format( resource_name=resource_name ) msg += "\n Searched in:" + "".join("\n - %r" % d for d in paths) sep = "*" * 70 resource_not_found = f"\n{sep}\n{msg}\n{sep}\n" raise LookupError(resource_not_found) The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo(corpus_root=None)` to solve the following problem: The CHILDES corpus should be manually downloaded and saved to ``[NLTK_Data_Dir]/corpora/childes/`` Here is the function: def demo(corpus_root=None): """ The CHILDES corpus should be manually downloaded and saved to ``[NLTK_Data_Dir]/corpora/childes/`` """ if not corpus_root: from nltk.data import find corpus_root = find("corpora/childes/data-xml/Eng-USA/") try: childes = CHILDESCorpusReader(corpus_root, ".*.xml") # describe all corpus for file in childes.fileids()[:5]: corpus = "" corpus_id = "" for (key, value) in childes.corpus(file)[0].items(): if key == "Corpus": corpus = value if key == "Id": corpus_id = value print("Reading", corpus, corpus_id, " .....") print("words:", childes.words(file)[:7], "...") print( "words with replaced words:", childes.words(file, replace=True)[:7], " ...", ) print("words with pos tags:", childes.tagged_words(file)[:7], " ...") print("words (only MOT):", childes.words(file, speaker="MOT")[:7], "...") print("words (only CHI):", childes.words(file, speaker="CHI")[:7], "...") print("stemmed words:", childes.words(file, stem=True)[:7], " ...") print( "words with relations and pos-tag:", childes.words(file, relation=True)[:5], " ...", ) print("sentence:", childes.sents(file)[:2], " ...") for (participant, values) in childes.participants(file)[0].items(): for (key, value) in values.items(): print("\tparticipant", participant, key, ":", value) print("num of sent:", len(childes.sents(file))) print("num of morphemes:", len(childes.words(file, stem=True))) print("age:", childes.age(file)) print("age in month:", childes.age(file, month=True)) print("MLU:", childes.MLU(file)) print() except LookupError as e: print( """The CHILDES corpus, or the parts you need, should be manually downloaded from https://childes.talkbank.org/data-xml/ and saved at [NLTK_Data_Dir]/corpora/childes/ Alternately, you can call the demo with the path to a portion of the CHILDES corpus, e.g.: demo('/path/to/childes/data-xml/Eng-USA/") """ ) # corpus_root_http = urllib2.urlopen('https://childes.talkbank.org/data-xml/Eng-USA/Bates.zip') # corpus_root_http_bates = zipfile.ZipFile(cStringIO.StringIO(corpus_root_http.read())) ##this fails # childes = CHILDESCorpusReader(corpus_root_http_bates,corpus_root_http_bates.namelist())
The CHILDES corpus should be manually downloaded and saved to ``[NLTK_Data_Dir]/corpora/childes/``
170,769
from nltk.corpus.reader.util import concat from nltk.corpus.reader.xmldocs import ElementTree, XMLCorpusReader, XMLCorpusView def _all_xmlwords_in(elt, result=None): if result is None: result = [] for child in elt: if child.tag in ("c", "w"): result.append(child) else: _all_xmlwords_in(child, result) return result
null
170,770
import sys import inspect def getinfo(func): """ Returns an info dictionary containing: - name (the name of the function : str) - argnames (the names of the arguments : list) - defaults (the values of the default arguments : tuple) - signature (the signature : str) - fullsignature (the full signature : Signature) - doc (the docstring : str) - module (the module name : str) - dict (the function __dict__ : str) >>> def f(self, x=1, y=2, *args, **kw): pass >>> info = getinfo(f) >>> info["name"] 'f' >>> info["argnames"] ['self', 'x', 'y', 'args', 'kw'] >>> info["defaults"] (1, 2) >>> info["signature"] 'self, x, y, *args, **kw' >>> info["fullsignature"] <Signature (self, x=1, y=2, *args, **kw)> """ assert inspect.ismethod(func) or inspect.isfunction(func) argspec = inspect.getfullargspec(func) regargs, varargs, varkwargs = argspec[:3] argnames = list(regargs) if varargs: argnames.append(varargs) if varkwargs: argnames.append(varkwargs) fullsignature = inspect.signature(func) # Convert Signature to str signature = __legacysignature(fullsignature) # pypy compatibility if hasattr(func, "__closure__"): _closure = func.__closure__ _globals = func.__globals__ else: _closure = func.func_closure _globals = func.func_globals return dict( name=func.__name__, argnames=argnames, signature=signature, fullsignature=fullsignature, defaults=func.__defaults__, doc=func.__doc__, module=func.__module__, dict=func.__dict__, globals=_globals, closure=_closure, ) def update_wrapper(wrapper, model, infodict=None): "akin to functools.update_wrapper" infodict = infodict or getinfo(model) wrapper.__name__ = infodict["name"] wrapper.__doc__ = infodict["doc"] wrapper.__module__ = infodict["module"] wrapper.__dict__.update(infodict["dict"]) wrapper.__defaults__ = infodict["defaults"] wrapper.undecorated = model return wrapper def decorator_factory(cls): """ Take a class with a ``.caller`` method and return a callable decorator object. It works by adding a suitable __call__ method to the class; it raises a TypeError if the class already has a nontrivial __call__ method. """ attrs = set(dir(cls)) if "__call__" in attrs: raise TypeError( "You cannot decorate a class with a nontrivial " "__call__ method" ) if "call" not in attrs: raise TypeError("You cannot decorate a class without a " ".call method") cls.__call__ = __call__ return cls The provided code snippet includes necessary dependencies for implementing the `decorator` function. Write a Python function `def decorator(caller)` to solve the following problem: General purpose decorator factory: takes a caller function as input and returns a decorator with the same attributes. A caller function is any function like this:: def caller(func, *args, **kw): # do something return func(*args, **kw) Here is an example of usage: >>> @decorator ... def chatty(f, *args, **kw): ... print("Calling %r" % f.__name__) ... return f(*args, **kw) >>> chatty.__name__ 'chatty' >>> @chatty ... def f(): pass ... >>> f() Calling 'f' decorator can also take in input a class with a .caller method; in this case it converts the class into a factory of callable decorator objects. See the documentation for an example. Here is the function: def decorator(caller): """ General purpose decorator factory: takes a caller function as input and returns a decorator with the same attributes. A caller function is any function like this:: def caller(func, *args, **kw): # do something return func(*args, **kw) Here is an example of usage: >>> @decorator ... def chatty(f, *args, **kw): ... print("Calling %r" % f.__name__) ... return f(*args, **kw) >>> chatty.__name__ 'chatty' >>> @chatty ... def f(): pass ... >>> f() Calling 'f' decorator can also take in input a class with a .caller method; in this case it converts the class into a factory of callable decorator objects. See the documentation for an example. """ if inspect.isclass(caller): return decorator_factory(caller) def _decorator(func): # the real meat is here infodict = getinfo(func) argnames = infodict["argnames"] assert not ( "_call_" in argnames or "_func_" in argnames ), "You cannot use _call_ or _func_ as argument names!" src = "lambda %(signature)s: _call_(_func_, %(signature)s)" % infodict # import sys; print >> sys.stderr, src # for debugging purposes dec_func = eval(src, dict(_func_=func, _call_=caller)) return update_wrapper(dec_func, func, infodict) return update_wrapper(_decorator, caller)
General purpose decorator factory: takes a caller function as input and returns a decorator with the same attributes. A caller function is any function like this:: def caller(func, *args, **kw): # do something return func(*args, **kw) Here is an example of usage: >>> @decorator ... def chatty(f, *args, **kw): ... print("Calling %r" % f.__name__) ... return f(*args, **kw) >>> chatty.__name__ 'chatty' >>> @chatty ... def f(): pass ... >>> f() Calling 'f' decorator can also take in input a class with a .caller method; in this case it converts the class into a factory of callable decorator objects. See the documentation for an example.
170,771
import sys import inspect def getattr_(obj, name, default_thunk): "Similar to .setdefault in dictionaries." try: return getattr(obj, name) except AttributeError: default = default_thunk() setattr(obj, name, default) return default def memoize(func, *args): dic = getattr_(func, "memoize_dic", dict) # memoize_dic is created at the first call if args in dic: return dic[args] result = func(*args) dic[args] = result return result
null
170,772
import fnmatch import locale import os import re import stat import subprocess import sys import textwrap import types import warnings from xml.etree import ElementTree _java_bin = None _java_options = [] def config_java(bin=None, options=None, verbose=False): """ Configure nltk's java interface, by letting nltk know where it can find the Java binary, and what extra options (if any) should be passed to Java when it is run. :param bin: The full path to the Java binary. If not specified, then nltk will search the system for a Java binary; and if one is not found, it will raise a ``LookupError`` exception. :type bin: str :param options: A list of options that should be passed to the Java binary when it is called. A common value is ``'-Xmx512m'``, which tells Java binary to increase the maximum heap size to 512 megabytes. If no options are specified, then do not modify the options list. :type options: list(str) """ global _java_bin, _java_options _java_bin = find_binary( "java", bin, env_vars=["JAVAHOME", "JAVA_HOME"], verbose=verbose, binary_names=["java.exe"], ) if options is not None: if isinstance(options, str): options = options.split() _java_options = list(options) def _decode_stdoutdata(stdoutdata): """Convert data read from stdout/stderr to unicode""" if not isinstance(stdoutdata, bytes): return stdoutdata encoding = getattr(sys.__stdout__, "encoding", locale.getpreferredencoding()) if encoding is None: return stdoutdata.decode() return stdoutdata.decode(encoding) The provided code snippet includes necessary dependencies for implementing the `java` function. Write a Python function `def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None, blocking=True)` to solve the following problem: Execute the given java command, by opening a subprocess that calls Java. If java has not yet been configured, it will be configured by calling ``config_java()`` with no arguments. :param cmd: The java command that should be called, formatted as a list of strings. Typically, the first string will be the name of the java class; and the remaining strings will be arguments for that java class. :type cmd: list(str) :param classpath: A ``':'`` separated list of directories, JAR archives, and ZIP archives to search for class files. :type classpath: str :param stdin: Specify the executed program's standard input file handles, respectively. Valid values are ``subprocess.PIPE``, an existing file descriptor (a positive integer), an existing file object, 'pipe', 'stdout', 'devnull' and None. ``subprocess.PIPE`` indicates that a new pipe to the child should be created. With None, no redirection will occur; the child's file handles will be inherited from the parent. Additionally, stderr can be ``subprocess.STDOUT``, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. :param stdout: Specify the executed program's standard output file handle. See ``stdin`` for valid values. :param stderr: Specify the executed program's standard error file handle. See ``stdin`` for valid values. :param blocking: If ``false``, then return immediately after spawning the subprocess. In this case, the return value is the ``Popen`` object, and not a ``(stdout, stderr)`` tuple. :return: If ``blocking=True``, then return a tuple ``(stdout, stderr)``, containing the stdout and stderr outputs generated by the java command if the ``stdout`` and ``stderr`` parameters were set to ``subprocess.PIPE``; or None otherwise. If ``blocking=False``, then return a ``subprocess.Popen`` object. :raise OSError: If the java command returns a nonzero return code. Here is the function: def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None, blocking=True): """ Execute the given java command, by opening a subprocess that calls Java. If java has not yet been configured, it will be configured by calling ``config_java()`` with no arguments. :param cmd: The java command that should be called, formatted as a list of strings. Typically, the first string will be the name of the java class; and the remaining strings will be arguments for that java class. :type cmd: list(str) :param classpath: A ``':'`` separated list of directories, JAR archives, and ZIP archives to search for class files. :type classpath: str :param stdin: Specify the executed program's standard input file handles, respectively. Valid values are ``subprocess.PIPE``, an existing file descriptor (a positive integer), an existing file object, 'pipe', 'stdout', 'devnull' and None. ``subprocess.PIPE`` indicates that a new pipe to the child should be created. With None, no redirection will occur; the child's file handles will be inherited from the parent. Additionally, stderr can be ``subprocess.STDOUT``, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. :param stdout: Specify the executed program's standard output file handle. See ``stdin`` for valid values. :param stderr: Specify the executed program's standard error file handle. See ``stdin`` for valid values. :param blocking: If ``false``, then return immediately after spawning the subprocess. In this case, the return value is the ``Popen`` object, and not a ``(stdout, stderr)`` tuple. :return: If ``blocking=True``, then return a tuple ``(stdout, stderr)``, containing the stdout and stderr outputs generated by the java command if the ``stdout`` and ``stderr`` parameters were set to ``subprocess.PIPE``; or None otherwise. If ``blocking=False``, then return a ``subprocess.Popen`` object. :raise OSError: If the java command returns a nonzero return code. """ subprocess_output_dict = { "pipe": subprocess.PIPE, "stdout": subprocess.STDOUT, "devnull": subprocess.DEVNULL, } stdin = subprocess_output_dict.get(stdin, stdin) stdout = subprocess_output_dict.get(stdout, stdout) stderr = subprocess_output_dict.get(stderr, stderr) if isinstance(cmd, str): raise TypeError("cmd should be a list of strings") # Make sure we know where a java binary is. if _java_bin is None: config_java() # Set up the classpath. if isinstance(classpath, str): classpaths = [classpath] else: classpaths = list(classpath) classpath = os.path.pathsep.join(classpaths) # Construct the full command string. cmd = list(cmd) cmd = ["-cp", classpath] + cmd cmd = [_java_bin] + _java_options + cmd # Call java via a subprocess p = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr) if not blocking: return p (stdout, stderr) = p.communicate() # Check the return code. if p.returncode != 0: print(_decode_stdoutdata(stderr)) raise OSError("Java command failed : " + str(cmd)) return (stdout, stderr)
Execute the given java command, by opening a subprocess that calls Java. If java has not yet been configured, it will be configured by calling ``config_java()`` with no arguments. :param cmd: The java command that should be called, formatted as a list of strings. Typically, the first string will be the name of the java class; and the remaining strings will be arguments for that java class. :type cmd: list(str) :param classpath: A ``':'`` separated list of directories, JAR archives, and ZIP archives to search for class files. :type classpath: str :param stdin: Specify the executed program's standard input file handles, respectively. Valid values are ``subprocess.PIPE``, an existing file descriptor (a positive integer), an existing file object, 'pipe', 'stdout', 'devnull' and None. ``subprocess.PIPE`` indicates that a new pipe to the child should be created. With None, no redirection will occur; the child's file handles will be inherited from the parent. Additionally, stderr can be ``subprocess.STDOUT``, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. :param stdout: Specify the executed program's standard output file handle. See ``stdin`` for valid values. :param stderr: Specify the executed program's standard error file handle. See ``stdin`` for valid values. :param blocking: If ``false``, then return immediately after spawning the subprocess. In this case, the return value is the ``Popen`` object, and not a ``(stdout, stderr)`` tuple. :return: If ``blocking=True``, then return a tuple ``(stdout, stderr)``, containing the stdout and stderr outputs generated by the java command if the ``stdout`` and ``stderr`` parameters were set to ``subprocess.PIPE``; or None otherwise. If ``blocking=False``, then return a ``subprocess.Popen`` object. :raise OSError: If the java command returns a nonzero return code.
170,773
import fnmatch import locale import os import re import stat import subprocess import sys import textwrap import types import warnings from xml.etree import ElementTree class ReadError(ValueError): """ Exception raised by read_* functions when they fail. :param position: The index in the input string where an error occurred. :param expected: What was expected when an error occurred. """ def __init__(self, expected, position): ValueError.__init__(self, expected, position) self.expected = expected self.position = position def __str__(self): return f"Expected {self.expected} at {self.position}" _STRING_START_RE = re.compile(r"[uU]?[rR]?(\"\"\"|\'\'\'|\"|\')") The provided code snippet includes necessary dependencies for implementing the `read_str` function. Write a Python function `def read_str(s, start_position)` to solve the following problem: If a Python string literal begins at the specified position in the given string, then return a tuple ``(val, end_position)`` containing the value of the string literal and the position where it ends. Otherwise, raise a ``ReadError``. :param s: A string that will be checked to see if within which a Python string literal exists. :type s: str :param start_position: The specified beginning position of the string ``s`` to begin regex matching. :type start_position: int :return: A tuple containing the matched string literal evaluated as a string and the end position of the string literal. :rtype: tuple(str, int) :raise ReadError: If the ``_STRING_START_RE`` regex doesn't return a match in ``s`` at ``start_position``, i.e., open quote. If the ``_STRING_END_RE`` regex doesn't return a match in ``s`` at the end of the first match, i.e., close quote. :raise ValueError: If an invalid string (i.e., contains an invalid escape sequence) is passed into the ``eval``. :Example: >>> from nltk.internals import read_str >>> read_str('"Hello", World!', 0) ('Hello', 7) Here is the function: def read_str(s, start_position): """ If a Python string literal begins at the specified position in the given string, then return a tuple ``(val, end_position)`` containing the value of the string literal and the position where it ends. Otherwise, raise a ``ReadError``. :param s: A string that will be checked to see if within which a Python string literal exists. :type s: str :param start_position: The specified beginning position of the string ``s`` to begin regex matching. :type start_position: int :return: A tuple containing the matched string literal evaluated as a string and the end position of the string literal. :rtype: tuple(str, int) :raise ReadError: If the ``_STRING_START_RE`` regex doesn't return a match in ``s`` at ``start_position``, i.e., open quote. If the ``_STRING_END_RE`` regex doesn't return a match in ``s`` at the end of the first match, i.e., close quote. :raise ValueError: If an invalid string (i.e., contains an invalid escape sequence) is passed into the ``eval``. :Example: >>> from nltk.internals import read_str >>> read_str('"Hello", World!', 0) ('Hello', 7) """ # Read the open quote, and any modifiers. m = _STRING_START_RE.match(s, start_position) if not m: raise ReadError("open quote", start_position) quotemark = m.group(1) # Find the close quote. _STRING_END_RE = re.compile(r"\\|%s" % quotemark) position = m.end() while True: match = _STRING_END_RE.search(s, position) if not match: raise ReadError("close quote", position) if match.group(0) == "\\": position = match.end() + 1 else: break # Process it, using eval. Strings with invalid escape sequences # might raise ValueError. try: return eval(s[start_position : match.end()]), match.end() except ValueError as e: raise ReadError("valid escape sequence", start_position) from e
If a Python string literal begins at the specified position in the given string, then return a tuple ``(val, end_position)`` containing the value of the string literal and the position where it ends. Otherwise, raise a ``ReadError``. :param s: A string that will be checked to see if within which a Python string literal exists. :type s: str :param start_position: The specified beginning position of the string ``s`` to begin regex matching. :type start_position: int :return: A tuple containing the matched string literal evaluated as a string and the end position of the string literal. :rtype: tuple(str, int) :raise ReadError: If the ``_STRING_START_RE`` regex doesn't return a match in ``s`` at ``start_position``, i.e., open quote. If the ``_STRING_END_RE`` regex doesn't return a match in ``s`` at the end of the first match, i.e., close quote. :raise ValueError: If an invalid string (i.e., contains an invalid escape sequence) is passed into the ``eval``. :Example: >>> from nltk.internals import read_str >>> read_str('"Hello", World!', 0) ('Hello', 7)
170,774
import fnmatch import locale import os import re import stat import subprocess import sys import textwrap import types import warnings from xml.etree import ElementTree class ReadError(ValueError): """ Exception raised by read_* functions when they fail. :param position: The index in the input string where an error occurred. :param expected: What was expected when an error occurred. """ def __init__(self, expected, position): ValueError.__init__(self, expected, position) self.expected = expected self.position = position def __str__(self): return f"Expected {self.expected} at {self.position}" _READ_INT_RE = re.compile(r"-?\d+") The provided code snippet includes necessary dependencies for implementing the `read_int` function. Write a Python function `def read_int(s, start_position)` to solve the following problem: If an integer begins at the specified position in the given string, then return a tuple ``(val, end_position)`` containing the value of the integer and the position where it ends. Otherwise, raise a ``ReadError``. :param s: A string that will be checked to see if within which a Python integer exists. :type s: str :param start_position: The specified beginning position of the string ``s`` to begin regex matching. :type start_position: int :return: A tuple containing the matched integer casted to an int, and the end position of the int in ``s``. :rtype: tuple(int, int) :raise ReadError: If the ``_READ_INT_RE`` regex doesn't return a match in ``s`` at ``start_position``. :Example: >>> from nltk.internals import read_int >>> read_int('42 is the answer', 0) (42, 2) Here is the function: def read_int(s, start_position): """ If an integer begins at the specified position in the given string, then return a tuple ``(val, end_position)`` containing the value of the integer and the position where it ends. Otherwise, raise a ``ReadError``. :param s: A string that will be checked to see if within which a Python integer exists. :type s: str :param start_position: The specified beginning position of the string ``s`` to begin regex matching. :type start_position: int :return: A tuple containing the matched integer casted to an int, and the end position of the int in ``s``. :rtype: tuple(int, int) :raise ReadError: If the ``_READ_INT_RE`` regex doesn't return a match in ``s`` at ``start_position``. :Example: >>> from nltk.internals import read_int >>> read_int('42 is the answer', 0) (42, 2) """ m = _READ_INT_RE.match(s, start_position) if not m: raise ReadError("integer", start_position) return int(m.group()), m.end()
If an integer begins at the specified position in the given string, then return a tuple ``(val, end_position)`` containing the value of the integer and the position where it ends. Otherwise, raise a ``ReadError``. :param s: A string that will be checked to see if within which a Python integer exists. :type s: str :param start_position: The specified beginning position of the string ``s`` to begin regex matching. :type start_position: int :return: A tuple containing the matched integer casted to an int, and the end position of the int in ``s``. :rtype: tuple(int, int) :raise ReadError: If the ``_READ_INT_RE`` regex doesn't return a match in ``s`` at ``start_position``. :Example: >>> from nltk.internals import read_int >>> read_int('42 is the answer', 0) (42, 2)
170,775
import fnmatch import locale import os import re import stat import subprocess import sys import textwrap import types import warnings from xml.etree import ElementTree class ReadError(ValueError): """ Exception raised by read_* functions when they fail. :param position: The index in the input string where an error occurred. :param expected: What was expected when an error occurred. """ def __init__(self, expected, position): ValueError.__init__(self, expected, position) self.expected = expected self.position = position def __str__(self): return f"Expected {self.expected} at {self.position}" _READ_NUMBER_VALUE = re.compile(r"-?(\d*)([.]?\d*)?") The provided code snippet includes necessary dependencies for implementing the `read_number` function. Write a Python function `def read_number(s, start_position)` to solve the following problem: If an integer or float begins at the specified position in the given string, then return a tuple ``(val, end_position)`` containing the value of the number and the position where it ends. Otherwise, raise a ``ReadError``. :param s: A string that will be checked to see if within which a Python number exists. :type s: str :param start_position: The specified beginning position of the string ``s`` to begin regex matching. :type start_position: int :return: A tuple containing the matched number casted to a ``float``, and the end position of the number in ``s``. :rtype: tuple(float, int) :raise ReadError: If the ``_READ_NUMBER_VALUE`` regex doesn't return a match in ``s`` at ``start_position``. :Example: >>> from nltk.internals import read_number >>> read_number('Pi is 3.14159', 6) (3.14159, 13) Here is the function: def read_number(s, start_position): """ If an integer or float begins at the specified position in the given string, then return a tuple ``(val, end_position)`` containing the value of the number and the position where it ends. Otherwise, raise a ``ReadError``. :param s: A string that will be checked to see if within which a Python number exists. :type s: str :param start_position: The specified beginning position of the string ``s`` to begin regex matching. :type start_position: int :return: A tuple containing the matched number casted to a ``float``, and the end position of the number in ``s``. :rtype: tuple(float, int) :raise ReadError: If the ``_READ_NUMBER_VALUE`` regex doesn't return a match in ``s`` at ``start_position``. :Example: >>> from nltk.internals import read_number >>> read_number('Pi is 3.14159', 6) (3.14159, 13) """ m = _READ_NUMBER_VALUE.match(s, start_position) if not m or not (m.group(1) or m.group(2)): raise ReadError("number", start_position) if m.group(2): return float(m.group()), m.end() else: return int(m.group()), m.end()
If an integer or float begins at the specified position in the given string, then return a tuple ``(val, end_position)`` containing the value of the number and the position where it ends. Otherwise, raise a ``ReadError``. :param s: A string that will be checked to see if within which a Python number exists. :type s: str :param start_position: The specified beginning position of the string ``s`` to begin regex matching. :type start_position: int :return: A tuple containing the matched number casted to a ``float``, and the end position of the number in ``s``. :rtype: tuple(float, int) :raise ReadError: If the ``_READ_NUMBER_VALUE`` regex doesn't return a match in ``s`` at ``start_position``. :Example: >>> from nltk.internals import read_number >>> read_number('Pi is 3.14159', 6) (3.14159, 13)
170,776
import fnmatch import locale import os import re import stat import subprocess import sys import textwrap import types import warnings from xml.etree import ElementTree def _mro(cls): """ Return the method resolution order for ``cls`` -- i.e., a list containing ``cls`` and all its base classes, in the order in which they would be checked by ``getattr``. For new-style classes, this is just cls.__mro__. For classic classes, this can be obtained by a depth-first left-to-right traversal of ``__bases__``. """ if isinstance(cls, type): return cls.__mro__ else: mro = [cls] for base in cls.__bases__: mro.extend(_mro(base)) return mro The provided code snippet includes necessary dependencies for implementing the `overridden` function. Write a Python function `def overridden(method)` to solve the following problem: :return: True if ``method`` overrides some method with the same name in a base class. This is typically used when defining abstract base classes or interfaces, to allow subclasses to define either of two related methods: >>> class EaterI: ... '''Subclass must define eat() or batch_eat().''' ... def eat(self, food): ... if overridden(self.batch_eat): ... return self.batch_eat([food])[0] ... else: ... raise NotImplementedError() ... def batch_eat(self, foods): ... return [self.eat(food) for food in foods] :type method: instance method Here is the function: def overridden(method): """ :return: True if ``method`` overrides some method with the same name in a base class. This is typically used when defining abstract base classes or interfaces, to allow subclasses to define either of two related methods: >>> class EaterI: ... '''Subclass must define eat() or batch_eat().''' ... def eat(self, food): ... if overridden(self.batch_eat): ... return self.batch_eat([food])[0] ... else: ... raise NotImplementedError() ... def batch_eat(self, foods): ... return [self.eat(food) for food in foods] :type method: instance method """ if isinstance(method, types.MethodType) and method.__self__.__class__ is not None: name = method.__name__ funcs = [ cls.__dict__[name] for cls in _mro(method.__self__.__class__) if name in cls.__dict__ ] return len(funcs) > 1 else: raise TypeError("Expected an instance method.")
:return: True if ``method`` overrides some method with the same name in a base class. This is typically used when defining abstract base classes or interfaces, to allow subclasses to define either of two related methods: >>> class EaterI: ... '''Subclass must define eat() or batch_eat().''' ... def eat(self, food): ... if overridden(self.batch_eat): ... return self.batch_eat([food])[0] ... else: ... raise NotImplementedError() ... def batch_eat(self, foods): ... return [self.eat(food) for food in foods] :type method: instance method
170,777
import fnmatch import locale import os import re import stat import subprocess import sys import textwrap import types import warnings from xml.etree import ElementTree def _add_epytext_field(obj, field, message): """Add an epytext @field to a given object's docstring.""" indent = "" # If we already have a docstring, then add a blank line to separate # it from the new field, and check its indentation. if obj.__doc__: obj.__doc__ = obj.__doc__.rstrip() + "\n\n" indents = re.findall(r"(?<=\n)[ ]+(?!\s)", obj.__doc__.expandtabs()) if indents: indent = min(indents) # If we don't have a docstring, add an empty one. else: obj.__doc__ = "" obj.__doc__ += textwrap.fill( f"@{field}: {message}", initial_indent=indent, subsequent_indent=indent + " ", ) The provided code snippet includes necessary dependencies for implementing the `deprecated` function. Write a Python function `def deprecated(message)` to solve the following problem: A decorator used to mark functions as deprecated. This will cause a warning to be printed the when the function is used. Usage: >>> from nltk.internals import deprecated >>> @deprecated('Use foo() instead') ... def bar(x): ... print(x/10) Here is the function: def deprecated(message): """ A decorator used to mark functions as deprecated. This will cause a warning to be printed the when the function is used. Usage: >>> from nltk.internals import deprecated >>> @deprecated('Use foo() instead') ... def bar(x): ... print(x/10) """ def decorator(func): msg = f"Function {func.__name__}() has been deprecated. {message}" msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ") def newFunc(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning, stacklevel=2) return func(*args, **kwargs) # Copy the old function's name, docstring, & dict newFunc.__dict__.update(func.__dict__) newFunc.__name__ = func.__name__ newFunc.__doc__ = func.__doc__ newFunc.__deprecated__ = True # Add a @deprecated field to the docstring. _add_epytext_field(newFunc, "deprecated", message) return newFunc return decorator
A decorator used to mark functions as deprecated. This will cause a warning to be printed the when the function is used. Usage: >>> from nltk.internals import deprecated >>> @deprecated('Use foo() instead') ... def bar(x): ... print(x/10)
170,778
import fnmatch import locale import os import re import stat import subprocess import sys import textwrap import types import warnings from xml.etree import ElementTree def find_jar_iter( name_pattern, path_to_jar=None, env_vars=(), searchpath=(), url=None, verbose=False, is_regex=False, ): """ Search for a jar that is used by nltk. :param name_pattern: The name of the jar file :param path_to_jar: The user-supplied jar location, or None. :param env_vars: A list of environment variable names to check in addition to the CLASSPATH variable which is checked by default. :param searchpath: List of directories to search. :param is_regex: Whether name is a regular expression. """ assert isinstance(name_pattern, str) assert not isinstance(searchpath, str) if isinstance(env_vars, str): env_vars = env_vars.split() yielded = False # Make sure we check the CLASSPATH first env_vars = ["CLASSPATH"] + list(env_vars) # If an explicit location was given, then check it, and yield it if # it's present; otherwise, complain. if path_to_jar is not None: if os.path.isfile(path_to_jar): yielded = True yield path_to_jar else: raise LookupError( f"Could not find {name_pattern} jar file at {path_to_jar}" ) # Check environment variables for env_var in env_vars: if env_var in os.environ: if env_var == "CLASSPATH": classpath = os.environ["CLASSPATH"] for cp in classpath.split(os.path.pathsep): cp = os.path.expanduser(cp) if os.path.isfile(cp): filename = os.path.basename(cp) if ( is_regex and re.match(name_pattern, filename) or (not is_regex and filename == name_pattern) ): if verbose: print(f"[Found {name_pattern}: {cp}]") yielded = True yield cp # The case where user put directory containing the jar file in the classpath if os.path.isdir(cp): if not is_regex: if os.path.isfile(os.path.join(cp, name_pattern)): if verbose: print(f"[Found {name_pattern}: {cp}]") yielded = True yield os.path.join(cp, name_pattern) else: # Look for file using regular expression for file_name in os.listdir(cp): if re.match(name_pattern, file_name): if verbose: print( "[Found %s: %s]" % ( name_pattern, os.path.join(cp, file_name), ) ) yielded = True yield os.path.join(cp, file_name) else: jar_env = os.path.expanduser(os.environ[env_var]) jar_iter = ( ( os.path.join(jar_env, path_to_jar) for path_to_jar in os.listdir(jar_env) ) if os.path.isdir(jar_env) else (jar_env,) ) for path_to_jar in jar_iter: if os.path.isfile(path_to_jar): filename = os.path.basename(path_to_jar) if ( is_regex and re.match(name_pattern, filename) or (not is_regex and filename == name_pattern) ): if verbose: print(f"[Found {name_pattern}: {path_to_jar}]") yielded = True yield path_to_jar # Check the path list. for directory in searchpath: if is_regex: for filename in os.listdir(directory): path_to_jar = os.path.join(directory, filename) if os.path.isfile(path_to_jar): if re.match(name_pattern, filename): if verbose: print(f"[Found {filename}: {path_to_jar}]") yielded = True yield path_to_jar else: path_to_jar = os.path.join(directory, name_pattern) if os.path.isfile(path_to_jar): if verbose: print(f"[Found {name_pattern}: {path_to_jar}]") yielded = True yield path_to_jar if not yielded: # If nothing was found, raise an error msg = "NLTK was unable to find %s!" % name_pattern if env_vars: msg += " Set the %s environment variable" % env_vars[0] msg = textwrap.fill(msg + ".", initial_indent=" ", subsequent_indent=" ") if searchpath: msg += "\n\n Searched in:" msg += "".join("\n - %s" % d for d in searchpath) if url: msg += "\n\n For more information, on {}, see:\n <{}>".format( name_pattern, url, ) div = "=" * 75 raise LookupError(f"\n\n{div}\n{msg}\n{div}") def find_jar( name_pattern, path_to_jar=None, env_vars=(), searchpath=(), url=None, verbose=False, is_regex=False, ): return next( find_jar_iter( name_pattern, path_to_jar, env_vars, searchpath, url, verbose, is_regex ) )
null
170,779
import fnmatch import locale import os import re import stat import subprocess import sys import textwrap import types import warnings from xml.etree import ElementTree The provided code snippet includes necessary dependencies for implementing the `import_from_stdlib` function. Write a Python function `def import_from_stdlib(module)` to solve the following problem: When python is run from within the nltk/ directory tree, the current directory is included at the beginning of the search path. Unfortunately, that means that modules within nltk can sometimes shadow standard library modules. As an example, the stdlib 'inspect' module will attempt to import the stdlib 'tokenize' module, but will instead end up importing NLTK's 'tokenize' module instead (causing the import to fail). Here is the function: def import_from_stdlib(module): """ When python is run from within the nltk/ directory tree, the current directory is included at the beginning of the search path. Unfortunately, that means that modules within nltk can sometimes shadow standard library modules. As an example, the stdlib 'inspect' module will attempt to import the stdlib 'tokenize' module, but will instead end up importing NLTK's 'tokenize' module instead (causing the import to fail). """ old_path = sys.path sys.path = [d for d in sys.path if d not in ("", ".")] m = __import__(module) sys.path = old_path return m
When python is run from within the nltk/ directory tree, the current directory is included at the beginning of the search path. Unfortunately, that means that modules within nltk can sometimes shadow standard library modules. As an example, the stdlib 'inspect' module will attempt to import the stdlib 'tokenize' module, but will instead end up importing NLTK's 'tokenize' module instead (causing the import to fail).
170,780
import fnmatch import locale import os import re import stat import subprocess import sys import textwrap import types import warnings from xml.etree import ElementTree The provided code snippet includes necessary dependencies for implementing the `slice_bounds` function. Write a Python function `def slice_bounds(sequence, slice_obj, allow_step=False)` to solve the following problem: Given a slice, return the corresponding (start, stop) bounds, taking into account None indices and negative indices. The following guarantees are made for the returned start and stop values: - 0 <= start <= len(sequence) - 0 <= stop <= len(sequence) - start <= stop :raise ValueError: If ``slice_obj.step`` is not None. :param allow_step: If true, then the slice object may have a non-None step. If it does, then return a tuple (start, stop, step). Here is the function: def slice_bounds(sequence, slice_obj, allow_step=False): """ Given a slice, return the corresponding (start, stop) bounds, taking into account None indices and negative indices. The following guarantees are made for the returned start and stop values: - 0 <= start <= len(sequence) - 0 <= stop <= len(sequence) - start <= stop :raise ValueError: If ``slice_obj.step`` is not None. :param allow_step: If true, then the slice object may have a non-None step. If it does, then return a tuple (start, stop, step). """ start, stop = (slice_obj.start, slice_obj.stop) # If allow_step is true, then include the step in our return # value tuple. if allow_step: step = slice_obj.step if step is None: step = 1 # Use a recursive call without allow_step to find the slice # bounds. If step is negative, then the roles of start and # stop (in terms of default values, etc), are swapped. if step < 0: start, stop = slice_bounds(sequence, slice(stop, start)) else: start, stop = slice_bounds(sequence, slice(start, stop)) return start, stop, step # Otherwise, make sure that no non-default step value is used. elif slice_obj.step not in (None, 1): raise ValueError( "slices with steps are not supported by %s" % sequence.__class__.__name__ ) # Supply default offsets. if start is None: start = 0 if stop is None: stop = len(sequence) # Handle negative indices. if start < 0: start = max(0, len(sequence) + start) if stop < 0: stop = max(0, len(sequence) + stop) # Make sure stop doesn't go past the end of the list. Note that # we avoid calculating len(sequence) if possible, because for lazy # sequences, calculating the length of a sequence can be expensive. if stop > 0: try: sequence[stop - 1] except IndexError: stop = len(sequence) # Make sure start isn't past stop. start = min(start, stop) # That's all folks! return start, stop
Given a slice, return the corresponding (start, stop) bounds, taking into account None indices and negative indices. The following guarantees are made for the returned start and stop values: - 0 <= start <= len(sequence) - 0 <= stop <= len(sequence) - start <= stop :raise ValueError: If ``slice_obj.step`` is not None. :param allow_step: If true, then the slice object may have a non-None step. If it does, then return a tuple (start, stop, step).
170,781
import fnmatch import locale import os import re import stat import subprocess import sys import textwrap import types import warnings from xml.etree import ElementTree def is_writable(path): # Ensure that it exists. if not os.path.exists(path): return False # If we're on a posix system, check its permissions. if hasattr(os, "getuid"): statdata = os.stat(path) perm = stat.S_IMODE(statdata.st_mode) # is it world-writable? if perm & 0o002: return True # do we own it? elif statdata.st_uid == os.getuid() and (perm & 0o200): return True # are we in a group that can write to it? elif (statdata.st_gid in [os.getgid()] + os.getgroups()) and (perm & 0o020): return True # otherwise, we can't write to it. else: return False # Otherwise, we'll assume it's writable. # [xx] should we do other checks on other platforms? return True
null
170,782
import fnmatch import locale import os import re import stat import subprocess import sys import textwrap import types import warnings from xml.etree import ElementTree def raise_unorderable_types(ordering, a, b): raise TypeError( "unorderable types: %s() %s %s()" % (type(a).__name__, ordering, type(b).__name__) )
null
170,783
import functools import re import nltk.tree def _build_tgrep_parser(set_parse_actions=True): """ Builds a pyparsing-based parser object for tokenizing and interpreting tgrep search strings. """ tgrep_op = pyparsing.Optional("!") + pyparsing.Regex("[$%,.<>][%,.<>0-9-':]*") tgrep_qstring = pyparsing.QuotedString( quoteChar='"', escChar="\\", unquoteResults=False ) tgrep_node_regex = pyparsing.QuotedString( quoteChar="/", escChar="\\", unquoteResults=False ) tgrep_qstring_icase = pyparsing.Regex('i@\\"(?:[^"\\n\\r\\\\]|(?:\\\\.))*\\"') tgrep_node_regex_icase = pyparsing.Regex("i@\\/(?:[^/\\n\\r\\\\]|(?:\\\\.))*\\/") tgrep_node_literal = pyparsing.Regex("[^][ \r\t\n;:.,&|<>()$!@%'^=]+") tgrep_expr = pyparsing.Forward() tgrep_relations = pyparsing.Forward() tgrep_parens = pyparsing.Literal("(") + tgrep_expr + ")" tgrep_nltk_tree_pos = ( pyparsing.Literal("N(") + pyparsing.Optional( pyparsing.Word(pyparsing.nums) + "," + pyparsing.Optional( pyparsing.delimitedList(pyparsing.Word(pyparsing.nums), delim=",") + pyparsing.Optional(",") ) ) + ")" ) tgrep_node_label = pyparsing.Regex("[A-Za-z0-9]+") tgrep_node_label_use = pyparsing.Combine("=" + tgrep_node_label) # see _tgrep_segmented_pattern_action tgrep_node_label_use_pred = tgrep_node_label_use.copy() macro_name = pyparsing.Regex("[^];:.,&|<>()[$!@%'^=\r\t\n ]+") macro_name.setWhitespaceChars("") macro_use = pyparsing.Combine("@" + macro_name) tgrep_node_expr = ( tgrep_node_label_use_pred | macro_use | tgrep_nltk_tree_pos | tgrep_qstring_icase | tgrep_node_regex_icase | tgrep_qstring | tgrep_node_regex | "*" | tgrep_node_literal ) tgrep_node_expr2 = ( tgrep_node_expr + pyparsing.Literal("=").setWhitespaceChars("") + tgrep_node_label.copy().setWhitespaceChars("") ) | tgrep_node_expr tgrep_node = tgrep_parens | ( pyparsing.Optional("'") + tgrep_node_expr2 + pyparsing.ZeroOrMore("|" + tgrep_node_expr) ) tgrep_brackets = pyparsing.Optional("!") + "[" + tgrep_relations + "]" tgrep_relation = tgrep_brackets | (tgrep_op + tgrep_node) tgrep_rel_conjunction = pyparsing.Forward() tgrep_rel_conjunction << ( tgrep_relation + pyparsing.ZeroOrMore(pyparsing.Optional("&") + tgrep_rel_conjunction) ) tgrep_relations << tgrep_rel_conjunction + pyparsing.ZeroOrMore( "|" + tgrep_relations ) tgrep_expr << tgrep_node + pyparsing.Optional(tgrep_relations) tgrep_expr_labeled = tgrep_node_label_use + pyparsing.Optional(tgrep_relations) tgrep_expr2 = tgrep_expr + pyparsing.ZeroOrMore(":" + tgrep_expr_labeled) macro_defn = ( pyparsing.Literal("@") + pyparsing.White().suppress() + macro_name + tgrep_expr2 ) tgrep_exprs = ( pyparsing.Optional(macro_defn + pyparsing.ZeroOrMore(";" + macro_defn) + ";") + tgrep_expr2 + pyparsing.ZeroOrMore(";" + (macro_defn | tgrep_expr2)) + pyparsing.ZeroOrMore(";").suppress() ) if set_parse_actions: tgrep_node_label_use.setParseAction(_tgrep_node_label_use_action) tgrep_node_label_use_pred.setParseAction(_tgrep_node_label_pred_use_action) macro_use.setParseAction(_tgrep_macro_use_action) tgrep_node.setParseAction(_tgrep_node_action) tgrep_node_expr2.setParseAction(_tgrep_bind_node_label_action) tgrep_parens.setParseAction(_tgrep_parens_action) tgrep_nltk_tree_pos.setParseAction(_tgrep_nltk_tree_pos_action) tgrep_relation.setParseAction(_tgrep_relation_action) tgrep_rel_conjunction.setParseAction(_tgrep_conjunction_action) tgrep_relations.setParseAction(_tgrep_rel_disjunction_action) macro_defn.setParseAction(_macro_defn_action) # the whole expression is also the conjunction of two # predicates: the first node predicate, and the remaining # relation predicates tgrep_expr.setParseAction(_tgrep_conjunction_action) tgrep_expr_labeled.setParseAction(_tgrep_segmented_pattern_action) tgrep_expr2.setParseAction( functools.partial(_tgrep_conjunction_action, join_char=":") ) tgrep_exprs.setParseAction(_tgrep_exprs_action) return tgrep_exprs.ignore("#" + pyparsing.restOfLine) The provided code snippet includes necessary dependencies for implementing the `tgrep_tokenize` function. Write a Python function `def tgrep_tokenize(tgrep_string)` to solve the following problem: Tokenizes a TGrep search string into separate tokens. Here is the function: def tgrep_tokenize(tgrep_string): """ Tokenizes a TGrep search string into separate tokens. """ parser = _build_tgrep_parser(False) if isinstance(tgrep_string, bytes): tgrep_string = tgrep_string.decode() return list(parser.parseString(tgrep_string))
Tokenizes a TGrep search string into separate tokens.
170,784
import functools import re import nltk.tree def tgrep_compile(tgrep_string): """ Parses (and tokenizes, if necessary) a TGrep search string into a lambda function. """ parser = _build_tgrep_parser(True) if isinstance(tgrep_string, bytes): tgrep_string = tgrep_string.decode() return list(parser.parseString(tgrep_string, parseAll=True))[0] def treepositions_no_leaves(tree): """ Returns all the tree positions in the given tree which are not leaf nodes. """ treepositions = tree.treepositions() # leaves are treeposition tuples that are not prefixes of any # other treeposition prefixes = set() for pos in treepositions: for length in range(len(pos)): prefixes.add(pos[:length]) return [pos for pos in treepositions if pos in prefixes] The provided code snippet includes necessary dependencies for implementing the `tgrep_positions` function. Write a Python function `def tgrep_positions(pattern, trees, search_leaves=True)` to solve the following problem: Return the tree positions in the trees which match the given pattern. :param pattern: a tgrep search pattern :type pattern: str or output of tgrep_compile() :param trees: a sequence of NLTK trees (usually ParentedTrees) :type trees: iter(ParentedTree) or iter(Tree) :param search_leaves: whether to return matching leaf nodes :type search_leaves: bool :rtype: iter(tree positions) Here is the function: def tgrep_positions(pattern, trees, search_leaves=True): """ Return the tree positions in the trees which match the given pattern. :param pattern: a tgrep search pattern :type pattern: str or output of tgrep_compile() :param trees: a sequence of NLTK trees (usually ParentedTrees) :type trees: iter(ParentedTree) or iter(Tree) :param search_leaves: whether to return matching leaf nodes :type search_leaves: bool :rtype: iter(tree positions) """ if isinstance(pattern, (bytes, str)): pattern = tgrep_compile(pattern) for tree in trees: try: if search_leaves: positions = tree.treepositions() else: positions = treepositions_no_leaves(tree) yield [position for position in positions if pattern(tree[position])] except AttributeError: yield []
Return the tree positions in the trees which match the given pattern. :param pattern: a tgrep search pattern :type pattern: str or output of tgrep_compile() :param trees: a sequence of NLTK trees (usually ParentedTrees) :type trees: iter(ParentedTree) or iter(Tree) :param search_leaves: whether to return matching leaf nodes :type search_leaves: bool :rtype: iter(tree positions)
170,785
import functools import re import nltk.tree def tgrep_compile(tgrep_string): """ Parses (and tokenizes, if necessary) a TGrep search string into a lambda function. """ parser = _build_tgrep_parser(True) if isinstance(tgrep_string, bytes): tgrep_string = tgrep_string.decode() return list(parser.parseString(tgrep_string, parseAll=True))[0] def treepositions_no_leaves(tree): """ Returns all the tree positions in the given tree which are not leaf nodes. """ treepositions = tree.treepositions() # leaves are treeposition tuples that are not prefixes of any # other treeposition prefixes = set() for pos in treepositions: for length in range(len(pos)): prefixes.add(pos[:length]) return [pos for pos in treepositions if pos in prefixes] The provided code snippet includes necessary dependencies for implementing the `tgrep_nodes` function. Write a Python function `def tgrep_nodes(pattern, trees, search_leaves=True)` to solve the following problem: Return the tree nodes in the trees which match the given pattern. :param pattern: a tgrep search pattern :type pattern: str or output of tgrep_compile() :param trees: a sequence of NLTK trees (usually ParentedTrees) :type trees: iter(ParentedTree) or iter(Tree) :param search_leaves: whether to return matching leaf nodes :type search_leaves: bool :rtype: iter(tree nodes) Here is the function: def tgrep_nodes(pattern, trees, search_leaves=True): """ Return the tree nodes in the trees which match the given pattern. :param pattern: a tgrep search pattern :type pattern: str or output of tgrep_compile() :param trees: a sequence of NLTK trees (usually ParentedTrees) :type trees: iter(ParentedTree) or iter(Tree) :param search_leaves: whether to return matching leaf nodes :type search_leaves: bool :rtype: iter(tree nodes) """ if isinstance(pattern, (bytes, str)): pattern = tgrep_compile(pattern) for tree in trees: try: if search_leaves: positions = tree.treepositions() else: positions = treepositions_no_leaves(tree) yield [tree[position] for position in positions if pattern(tree[position])] except AttributeError: yield []
Return the tree nodes in the trees which match the given pattern. :param pattern: a tgrep search pattern :type pattern: str or output of tgrep_compile() :param trees: a sequence of NLTK trees (usually ParentedTrees) :type trees: iter(ParentedTree) or iter(Tree) :param search_leaves: whether to return matching leaf nodes :type search_leaves: bool :rtype: iter(tree nodes)
170,786
from nltk.corpus import ( genesis, gutenberg, inaugural, nps_chat, treebank, webtext, wordnet, ) from nltk.probability import FreqDist from nltk.text import Text from nltk.util import bigrams print("*** Introductory Examples for the NLTK Book ***") print("Loading text1, ..., text9 and sent1, ..., sent9") print("Type the name of the text or sentence to view it.") print("Type: 'texts()' or 'sents()' to list the materials.") text1 = Text(gutenberg.words("melville-moby_dick.txt")) print("text1:", text1.name) text2 = Text(gutenberg.words("austen-sense.txt")) print("text2:", text2.name) text3 = Text(genesis.words("english-kjv.txt"), name="The Book of Genesis") print("text3:", text3.name) text4 = Text(inaugural.words(), name="Inaugural Address Corpus") print("text4:", text4.name) text5 = Text(nps_chat.words(), name="Chat Corpus") print("text5:", text5.name) text6 = Text(webtext.words("grail.txt"), name="Monty Python and the Holy Grail") print("text6:", text6.name) text7 = Text(treebank.words(), name="Wall Street Journal") print("text7:", text7.name) text8 = Text(webtext.words("singles.txt"), name="Personals Corpus") print("text8:", text8.name) text9 = Text(gutenberg.words("chesterton-thursday.txt")) print("text9:", text9.name) def texts(): print("text1:", text1.name) print("text2:", text2.name) print("text3:", text3.name) print("text4:", text4.name) print("text5:", text5.name) print("text6:", text6.name) print("text7:", text7.name) print("text8:", text8.name) print("text9:", text9.name)
null
170,787
from nltk.corpus import ( genesis, gutenberg, inaugural, nps_chat, treebank, webtext, wordnet, ) from nltk.probability import FreqDist from nltk.text import Text from nltk.util import bigrams print("*** Introductory Examples for the NLTK Book ***") print("Loading text1, ..., text9 and sent1, ..., sent9") print("Type the name of the text or sentence to view it.") print("Type: 'texts()' or 'sents()' to list the materials.") print("text1:", text1.name) print("text2:", text2.name) print("text3:", text3.name) print("text4:", text4.name) print("text5:", text5.name) print("text6:", text6.name) print("text7:", text7.name) print("text8:", text8.name) print("text9:", text9.name) sent1 = ["Call", "me", "Ishmael", "."] sent2 = [ "The", "family", "of", "Dashwood", "had", "long", "been", "settled", "in", "Sussex", ".", ] sent3 = [ "In", "the", "beginning", "God", "created", "the", "heaven", "and", "the", "earth", ".", ] sent4 = [ "Fellow", "-", "Citizens", "of", "the", "Senate", "and", "of", "the", "House", "of", "Representatives", ":", ] sent5 = [ "I", "have", "a", "problem", "with", "people", "PMing", "me", "to", "lol", "JOIN", ] sent6 = [ "SCENE", "1", ":", "[", "wind", "]", "[", "clop", "clop", "clop", "]", "KING", "ARTHUR", ":", "Whoa", "there", "!", ] sent7 = [ "Pierre", "Vinken", ",", "61", "years", "old", ",", "will", "join", "the", "board", "as", "a", "nonexecutive", "director", "Nov.", "29", ".", ] sent8 = [ "25", "SEXY", "MALE", ",", "seeks", "attrac", "older", "single", "lady", ",", "for", "discreet", "encounters", ".", ] sent9 = [ "THE", "suburb", "of", "Saffron", "Park", "lay", "on", "the", "sunset", "side", "of", "London", ",", "as", "red", "and", "ragged", "as", "a", "cloud", "of", "sunset", ".", ] def sents(): print("sent1:", " ".join(sent1)) print("sent2:", " ".join(sent2)) print("sent3:", " ".join(sent3)) print("sent4:", " ".join(sent4)) print("sent5:", " ".join(sent5)) print("sent6:", " ".join(sent6)) print("sent7:", " ".join(sent7)) print("sent8:", " ".join(sent8)) print("sent9:", " ".join(sent9))
null
170,788
from nltk.corpus import wordnet wordnet: WordNetCorpusReader = LazyCorpusLoader( "wordnet", WordNetCorpusReader, LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"), ) The provided code snippet includes necessary dependencies for implementing the `lesk` function. Write a Python function `def lesk(context_sentence, ambiguous_word, pos=None, synsets=None)` to solve the following problem: Return a synset for an ambiguous word in a context. :param iter context_sentence: The context sentence where the ambiguous word occurs, passed as an iterable of words. :param str ambiguous_word: The ambiguous word that requires WSD. :param str pos: A specified Part-of-Speech (POS). :param iter synsets: Possible synsets of the ambiguous word. :return: ``lesk_sense`` The Synset() object with the highest signature overlaps. This function is an implementation of the original Lesk algorithm (1986) [1]. Usage example:: >>> lesk(['I', 'went', 'to', 'the', 'bank', 'to', 'deposit', 'money', '.'], 'bank', 'n') Synset('savings_bank.n.02') [1] Lesk, Michael. "Automatic sense disambiguation using machine readable dictionaries: how to tell a pine cone from an ice cream cone." Proceedings of the 5th Annual International Conference on Systems Documentation. ACM, 1986. https://dl.acm.org/citation.cfm?id=318728 Here is the function: def lesk(context_sentence, ambiguous_word, pos=None, synsets=None): """Return a synset for an ambiguous word in a context. :param iter context_sentence: The context sentence where the ambiguous word occurs, passed as an iterable of words. :param str ambiguous_word: The ambiguous word that requires WSD. :param str pos: A specified Part-of-Speech (POS). :param iter synsets: Possible synsets of the ambiguous word. :return: ``lesk_sense`` The Synset() object with the highest signature overlaps. This function is an implementation of the original Lesk algorithm (1986) [1]. Usage example:: >>> lesk(['I', 'went', 'to', 'the', 'bank', 'to', 'deposit', 'money', '.'], 'bank', 'n') Synset('savings_bank.n.02') [1] Lesk, Michael. "Automatic sense disambiguation using machine readable dictionaries: how to tell a pine cone from an ice cream cone." Proceedings of the 5th Annual International Conference on Systems Documentation. ACM, 1986. https://dl.acm.org/citation.cfm?id=318728 """ context = set(context_sentence) if synsets is None: synsets = wordnet.synsets(ambiguous_word) if pos: synsets = [ss for ss in synsets if str(ss.pos()) == pos] if not synsets: return None _, sense = max( (len(context.intersection(ss.definition().split())), ss) for ss in synsets ) return sense
Return a synset for an ambiguous word in a context. :param iter context_sentence: The context sentence where the ambiguous word occurs, passed as an iterable of words. :param str ambiguous_word: The ambiguous word that requires WSD. :param str pos: A specified Part-of-Speech (POS). :param iter synsets: Possible synsets of the ambiguous word. :return: ``lesk_sense`` The Synset() object with the highest signature overlaps. This function is an implementation of the original Lesk algorithm (1986) [1]. Usage example:: >>> lesk(['I', 'went', 'to', 'the', 'bank', 'to', 'deposit', 'money', '.'], 'bank', 'n') Synset('savings_bank.n.02') [1] Lesk, Michael. "Automatic sense disambiguation using machine readable dictionaries: how to tell a pine cone from an ice cream cone." Proceedings of the 5th Annual International Conference on Systems Documentation. ACM, 1986. https://dl.acm.org/citation.cfm?id=318728
170,789
import re import sys from collections import Counter, defaultdict, namedtuple from functools import reduce from math import log from nltk.collocations import BigramCollocationFinder from nltk.lm import MLE from nltk.lm.preprocessing import padded_everygram_pipeline from nltk.metrics import BigramAssocMeasures, f_measure from nltk.probability import ConditionalFreqDist as CFD from nltk.probability import FreqDist from nltk.tokenize import sent_tokenize from nltk.util import LazyConcatenation, tokenwrap class Text: """ A wrapper around a sequence of simple (string) tokens, which is intended to support initial exploration of texts (via the interactive console). Its methods perform a variety of analyses on the text's contexts (e.g., counting, concordancing, collocation discovery), and display the results. If you wish to write a program which makes use of these analyses, then you should bypass the ``Text`` class, and use the appropriate analysis function or class directly instead. A ``Text`` is typically initialized from a given document or corpus. E.g.: >>> import nltk.corpus >>> from nltk.text import Text >>> moby = Text(nltk.corpus.gutenberg.words('melville-moby_dick.txt')) """ # This defeats lazy loading, but makes things faster. This # *shouldn't* be necessary because the corpus view *should* be # doing intelligent caching, but without this it's running slow. # Look into whether the caching is working correctly. _COPY_TOKENS = True def __init__(self, tokens, name=None): """ Create a Text object. :param tokens: The source text. :type tokens: sequence of str """ if self._COPY_TOKENS: tokens = list(tokens) self.tokens = tokens if name: self.name = name elif "]" in tokens[:20]: end = tokens[:20].index("]") self.name = " ".join(str(tok) for tok in tokens[1:end]) else: self.name = " ".join(str(tok) for tok in tokens[:8]) + "..." # //////////////////////////////////////////////////////////// # Support item & slice access # //////////////////////////////////////////////////////////// def __getitem__(self, i): return self.tokens[i] def __len__(self): return len(self.tokens) # //////////////////////////////////////////////////////////// # Interactive console methods # //////////////////////////////////////////////////////////// def concordance(self, word, width=79, lines=25): """ Prints a concordance for ``word`` with the specified context window. Word matching is not case-sensitive. :param word: The target word or phrase (a list of strings) :type word: str or list :param width: The width of each line, in characters (default=80) :type width: int :param lines: The number of lines to display (default=25) :type lines: int :seealso: ``ConcordanceIndex`` """ if "_concordance_index" not in self.__dict__: self._concordance_index = ConcordanceIndex( self.tokens, key=lambda s: s.lower() ) return self._concordance_index.print_concordance(word, width, lines) def concordance_list(self, word, width=79, lines=25): """ Generate a concordance for ``word`` with the specified context window. Word matching is not case-sensitive. :param word: The target word or phrase (a list of strings) :type word: str or list :param width: The width of each line, in characters (default=80) :type width: int :param lines: The number of lines to display (default=25) :type lines: int :seealso: ``ConcordanceIndex`` """ if "_concordance_index" not in self.__dict__: self._concordance_index = ConcordanceIndex( self.tokens, key=lambda s: s.lower() ) return self._concordance_index.find_concordance(word, width)[:lines] def collocation_list(self, num=20, window_size=2): """ Return collocations derived from the text, ignoring stopwords. >>> from nltk.book import text4 >>> text4.collocation_list()[:2] [('United', 'States'), ('fellow', 'citizens')] :param num: The maximum number of collocations to return. :type num: int :param window_size: The number of tokens spanned by a collocation (default=2) :type window_size: int :rtype: list(tuple(str, str)) """ if not ( "_collocations" in self.__dict__ and self._num == num and self._window_size == window_size ): self._num = num self._window_size = window_size # print("Building collocations list") from nltk.corpus import stopwords ignored_words = stopwords.words("english") finder = BigramCollocationFinder.from_words(self.tokens, window_size) finder.apply_freq_filter(2) finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words) bigram_measures = BigramAssocMeasures() self._collocations = list( finder.nbest(bigram_measures.likelihood_ratio, num) ) return self._collocations def collocations(self, num=20, window_size=2): """ Print collocations derived from the text, ignoring stopwords. >>> from nltk.book import text4 >>> text4.collocations() # doctest: +NORMALIZE_WHITESPACE United States; fellow citizens; years ago; four years; Federal Government; General Government; American people; Vice President; God bless; Chief Justice; one another; fellow Americans; Old World; Almighty God; Fellow citizens; Chief Magistrate; every citizen; Indian tribes; public debt; foreign nations :param num: The maximum number of collocations to print. :type num: int :param window_size: The number of tokens spanned by a collocation (default=2) :type window_size: int """ collocation_strings = [ w1 + " " + w2 for w1, w2 in self.collocation_list(num, window_size) ] print(tokenwrap(collocation_strings, separator="; ")) def count(self, word): """ Count the number of times this word appears in the text. """ return self.tokens.count(word) def index(self, word): """ Find the index of the first occurrence of the word in the text. """ return self.tokens.index(word) def readability(self, method): # code from nltk_contrib.readability raise NotImplementedError def similar(self, word, num=20): """ Distributional similarity: find other words which appear in the same contexts as the specified word; list most similar words first. :param word: The word used to seed the similarity search :type word: str :param num: The number of words to generate (default=20) :type num: int :seealso: ContextIndex.similar_words() """ if "_word_context_index" not in self.__dict__: # print('Building word-context index...') self._word_context_index = ContextIndex( self.tokens, filter=lambda x: x.isalpha(), key=lambda s: s.lower() ) # words = self._word_context_index.similar_words(word, num) word = word.lower() wci = self._word_context_index._word_to_contexts if word in wci.conditions(): contexts = set(wci[word]) fd = Counter( w for w in wci.conditions() for c in wci[w] if c in contexts and not w == word ) words = [w for w, _ in fd.most_common(num)] print(tokenwrap(words)) else: print("No matches") def common_contexts(self, words, num=20): """ Find contexts where the specified words appear; list most frequent common contexts first. :param words: The words used to seed the similarity search :type words: str :param num: The number of words to generate (default=20) :type num: int :seealso: ContextIndex.common_contexts() """ if "_word_context_index" not in self.__dict__: # print('Building word-context index...') self._word_context_index = ContextIndex( self.tokens, key=lambda s: s.lower() ) try: fd = self._word_context_index.common_contexts(words, True) if not fd: print("No common contexts were found") else: ranked_contexts = [w for w, _ in fd.most_common(num)] print(tokenwrap(w1 + "_" + w2 for w1, w2 in ranked_contexts)) except ValueError as e: print(e) def dispersion_plot(self, words): """ Produce a plot showing the distribution of the words through the text. Requires pylab to be installed. :param words: The words to be plotted :type words: list(str) :seealso: nltk.draw.dispersion_plot() """ from nltk.draw import dispersion_plot dispersion_plot(self, words) def _train_default_ngram_lm(self, tokenized_sents, n=3): train_data, padded_sents = padded_everygram_pipeline(n, tokenized_sents) model = MLE(order=n) model.fit(train_data, padded_sents) return model def generate(self, length=100, text_seed=None, random_seed=42): """ Print random text, generated using a trigram language model. See also `help(nltk.lm)`. :param length: The length of text to generate (default=100) :type length: int :param text_seed: Generation can be conditioned on preceding context. :type text_seed: list(str) :param random_seed: A random seed or an instance of `random.Random`. If provided, makes the random sampling part of generation reproducible. (default=42) :type random_seed: int """ # Create the model when using it the first time. self._tokenized_sents = [ sent.split(" ") for sent in sent_tokenize(" ".join(self.tokens)) ] if not hasattr(self, "_trigram_model"): print("Building ngram index...", file=sys.stderr) self._trigram_model = self._train_default_ngram_lm( self._tokenized_sents, n=3 ) generated_tokens = [] assert length > 0, "The `length` must be more than 0." while len(generated_tokens) < length: for idx, token in enumerate( self._trigram_model.generate( length, text_seed=text_seed, random_seed=random_seed ) ): if token == "<s>": continue if token == "</s>": break generated_tokens.append(token) random_seed += 1 prefix = " ".join(text_seed) + " " if text_seed else "" output_str = prefix + tokenwrap(generated_tokens[:length]) print(output_str) return output_str def plot(self, *args): """ See documentation for FreqDist.plot() :seealso: nltk.prob.FreqDist.plot() """ return self.vocab().plot(*args) def vocab(self): """ :seealso: nltk.prob.FreqDist """ if "_vocab" not in self.__dict__: # print("Building vocabulary index...") self._vocab = FreqDist(self) return self._vocab def findall(self, regexp): """ Find instances of the regular expression in the text. The text is a list of tokens, and a regexp pattern to match a single token must be surrounded by angle brackets. E.g. >>> from nltk.book import text1, text5, text9 >>> text5.findall("<.*><.*><bro>") you rule bro; telling you bro; u twizted bro >>> text1.findall("<a>(<.*>)<man>") monied; nervous; dangerous; white; white; white; pious; queer; good; mature; white; Cape; great; wise; wise; butterless; white; fiendish; pale; furious; better; certain; complete; dismasted; younger; brave; brave; brave; brave >>> text9.findall("<th.*>{3,}") thread through those; the thought that; that the thing; the thing that; that that thing; through these than through; them that the; through the thick; them that they; thought that the :param regexp: A regular expression :type regexp: str """ if "_token_searcher" not in self.__dict__: self._token_searcher = TokenSearcher(self) hits = self._token_searcher.findall(regexp) hits = [" ".join(h) for h in hits] print(tokenwrap(hits, "; ")) # //////////////////////////////////////////////////////////// # Helper Methods # //////////////////////////////////////////////////////////// _CONTEXT_RE = re.compile(r"\w+|[\.\!\?]") def _context(self, tokens, i): """ One left & one right token, both case-normalized. Skip over non-sentence-final punctuation. Used by the ``ContextIndex`` that is created for ``similar()`` and ``common_contexts()``. """ # Left context j = i - 1 while j >= 0 and not self._CONTEXT_RE.match(tokens[j]): j -= 1 left = tokens[j] if j != 0 else "*START*" # Right context j = i + 1 while j < len(tokens) and not self._CONTEXT_RE.match(tokens[j]): j += 1 right = tokens[j] if j != len(tokens) else "*END*" return (left, right) # //////////////////////////////////////////////////////////// # String Display # //////////////////////////////////////////////////////////// def __str__(self): return "<Text: %s>" % self.name def __repr__(self): return "<Text: %s>" % self.name brown: CategorizedTaggedCorpusReader = LazyCorpusLoader( "brown", CategorizedTaggedCorpusReader, r"c[a-z]\d\d", cat_file="cats.txt", tagset="brown", encoding="ascii", ) def demo(): from nltk.corpus import brown text = Text(brown.words(categories="news")) print(text) print() print("Concordance:") text.concordance("news") print() print("Distributionally similar words:") text.similar("news") print() print("Collocations:") text.collocations() print() # print("Automatically generated text:") # text.generate() # print() print("Dispersion plot:") text.dispersion_plot(["news", "report", "said", "announced"]) print() print("Vocabulary plot:") text.plot(50) print() print("Indexing:") print("text[3]:", text[3]) print("text[3:5]:", text[3:5]) print("text.vocab()['news']:", text.vocab()["news"])
null
170,790
The provided code snippet includes necessary dependencies for implementing the `error_list` function. Write a Python function `def error_list(train_sents, test_sents)` to solve the following problem: Returns a list of human-readable strings indicating the errors in the given tagging of the corpus. :param train_sents: The correct tagging of the corpus :type train_sents: list(tuple) :param test_sents: The tagged corpus :type test_sents: list(tuple) Here is the function: def error_list(train_sents, test_sents): """ Returns a list of human-readable strings indicating the errors in the given tagging of the corpus. :param train_sents: The correct tagging of the corpus :type train_sents: list(tuple) :param test_sents: The tagged corpus :type test_sents: list(tuple) """ hdr = ("%25s | %s | %s\n" + "-" * 26 + "+" + "-" * 24 + "+" + "-" * 26) % ( "left context", "word/test->gold".center(22), "right context", ) errors = [hdr] for (train_sent, test_sent) in zip(train_sents, test_sents): for wordnum, (word, train_pos) in enumerate(train_sent): test_pos = test_sent[wordnum][1] if train_pos != test_pos: left = " ".join("%s/%s" % w for w in train_sent[:wordnum]) right = " ".join("%s/%s" % w for w in train_sent[wordnum + 1 :]) mid = f"{word}/{test_pos}->{train_pos}" errors.append(f"{left[-25:]:>25} | {mid.center(22)} | {right[:25]}") return errors
Returns a list of human-readable strings indicating the errors in the given tagging of the corpus. :param train_sents: The correct tagging of the corpus :type train_sents: list(tuple) :param test_sents: The tagged corpus :type test_sents: list(tuple)
170,791
import os import pickle import random import time from nltk.corpus import treebank from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger from nltk.tag.brill import Pos, Word from nltk.tbl import Template, error_list def postag( templates=None, tagged_data=None, num_sents=1000, max_rules=300, min_score=3, min_acc=None, train=0.8, trace=3, randomize=False, ruleformat="str", incremental_stats=False, template_stats=False, error_output=None, serialize_output=None, learning_curve_output=None, learning_curve_take=300, baseline_backoff_tagger=None, separate_baseline_data=False, cache_baseline_tagger=None, ): """ Brill Tagger Demonstration :param templates: how many sentences of training and testing data to use :type templates: list of Template :param tagged_data: maximum number of rule instances to create :type tagged_data: C{int} :param num_sents: how many sentences of training and testing data to use :type num_sents: C{int} :param max_rules: maximum number of rule instances to create :type max_rules: C{int} :param min_score: the minimum score for a rule in order for it to be considered :type min_score: C{int} :param min_acc: the minimum score for a rule in order for it to be considered :type min_acc: C{float} :param train: the fraction of the the corpus to be used for training (1=all) :type train: C{float} :param trace: the level of diagnostic tracing output to produce (0-4) :type trace: C{int} :param randomize: whether the training data should be a random subset of the corpus :type randomize: C{bool} :param ruleformat: rule output format, one of "str", "repr", "verbose" :type ruleformat: C{str} :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) :type incremental_stats: C{bool} :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing :type template_stats: C{bool} :param error_output: the file where errors will be saved :type error_output: C{string} :param serialize_output: the file where the learned tbl tagger will be saved :type serialize_output: C{string} :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) :type learning_curve_output: C{string} :param learning_curve_take: how many rules plotted :type learning_curve_take: C{int} :param baseline_backoff_tagger: the file where rules will be saved :type baseline_backoff_tagger: tagger :param separate_baseline_data: use a fraction of the training data exclusively for training baseline :type separate_baseline_data: C{bool} :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions) :type cache_baseline_tagger: C{string} Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This is fast and fine for a demo, but is likely to generalize worse on unseen data. Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). """ # defaults baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER if templates is None: from nltk.tag.brill import brill24, describe_template_sets # some pre-built template sets taken from typical systems or publications are # available. Print a list with describe_template_sets() # for instance: templates = brill24() (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ) # creating (or reloading from cache) a baseline tagger (unigram tagger) # this is just a mechanism for getting deterministic output from the baseline between # python versions if cache_baseline_tagger: if not os.path.exists(cache_baseline_tagger): baseline_tagger = UnigramTagger( baseline_data, backoff=baseline_backoff_tagger ) with open(cache_baseline_tagger, "w") as print_rules: pickle.dump(baseline_tagger, print_rules) print( "Trained baseline tagger, pickled it to {}".format( cache_baseline_tagger ) ) with open(cache_baseline_tagger) as print_rules: baseline_tagger = pickle.load(print_rules) print(f"Reloaded pickled tagger from {cache_baseline_tagger}") else: baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) print("Trained baseline tagger") if gold_data: print( " Accuracy on test set: {:0.4f}".format( baseline_tagger.accuracy(gold_data) ) ) # creating a Brill tagger tbrill = time.time() trainer = BrillTaggerTrainer( baseline_tagger, templates, trace, ruleformat=ruleformat ) print("Training tbl tagger...") brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") if gold_data: print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) # printing the learned rules, if learned silently if trace == 1: print("\nLearned rules: ") for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): print(f"{ruleno:4d} {rule.format(ruleformat):s}") # printing template statistics (optionally including comparison with the training data) # note: if not separate_baseline_data, then baseline accuracy will be artificially high if incremental_stats: print( "Incrementally tagging the test data, collecting individual rule statistics" ) (taggedtest, teststats) = brill_tagger.batch_tag_incremental( testing_data, gold_data ) print(" Rule statistics collected") if not separate_baseline_data: print( "WARNING: train_stats asked for separate_baseline_data=True; the baseline " "will be artificially high" ) trainstats = brill_tagger.train_stats() if template_stats: brill_tagger.print_template_statistics(teststats) if learning_curve_output: _demo_plot( learning_curve_output, teststats, trainstats, take=learning_curve_take ) print(f"Wrote plot of learning curve to {learning_curve_output}") else: print("Tagging the test data") taggedtest = brill_tagger.tag_sents(testing_data) if template_stats: brill_tagger.print_template_statistics() # writing error analysis to file if error_output is not None: with open(error_output, "w") as f: f.write("Errors for Brill Tagger %r\n\n" % serialize_output) f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") print(f"Wrote tagger errors including context to {error_output}") # serializing the tagger to a pickle file and reloading (just to see it works) if serialize_output is not None: taggedtest = brill_tagger.tag_sents(testing_data) with open(serialize_output, "w") as print_rules: pickle.dump(brill_tagger, print_rules) print(f"Wrote pickled tagger to {serialize_output}") with open(serialize_output) as print_rules: brill_tagger_reloaded = pickle.load(print_rules) print(f"Reloaded pickled tagger from {serialize_output}") taggedtest_reloaded = brill_tagger.tag_sents(testing_data) if taggedtest == taggedtest_reloaded: print("Reloaded tagger tried on test set, results identical") else: print("PROBLEM: Reloaded tagger gave different results on test set") The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem: Run a demo with defaults. See source comments for details, or docstrings of any of the more specific demo_* functions. Here is the function: def demo(): """ Run a demo with defaults. See source comments for details, or docstrings of any of the more specific demo_* functions. """ postag()
Run a demo with defaults. See source comments for details, or docstrings of any of the more specific demo_* functions.
170,792
import os import pickle import random import time from nltk.corpus import treebank from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger from nltk.tag.brill import Pos, Word from nltk.tbl import Template, error_list def postag( templates=None, tagged_data=None, num_sents=1000, max_rules=300, min_score=3, min_acc=None, train=0.8, trace=3, randomize=False, ruleformat="str", incremental_stats=False, template_stats=False, error_output=None, serialize_output=None, learning_curve_output=None, learning_curve_take=300, baseline_backoff_tagger=None, separate_baseline_data=False, cache_baseline_tagger=None, ): """ Brill Tagger Demonstration :param templates: how many sentences of training and testing data to use :type templates: list of Template :param tagged_data: maximum number of rule instances to create :type tagged_data: C{int} :param num_sents: how many sentences of training and testing data to use :type num_sents: C{int} :param max_rules: maximum number of rule instances to create :type max_rules: C{int} :param min_score: the minimum score for a rule in order for it to be considered :type min_score: C{int} :param min_acc: the minimum score for a rule in order for it to be considered :type min_acc: C{float} :param train: the fraction of the the corpus to be used for training (1=all) :type train: C{float} :param trace: the level of diagnostic tracing output to produce (0-4) :type trace: C{int} :param randomize: whether the training data should be a random subset of the corpus :type randomize: C{bool} :param ruleformat: rule output format, one of "str", "repr", "verbose" :type ruleformat: C{str} :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) :type incremental_stats: C{bool} :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing :type template_stats: C{bool} :param error_output: the file where errors will be saved :type error_output: C{string} :param serialize_output: the file where the learned tbl tagger will be saved :type serialize_output: C{string} :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) :type learning_curve_output: C{string} :param learning_curve_take: how many rules plotted :type learning_curve_take: C{int} :param baseline_backoff_tagger: the file where rules will be saved :type baseline_backoff_tagger: tagger :param separate_baseline_data: use a fraction of the training data exclusively for training baseline :type separate_baseline_data: C{bool} :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions) :type cache_baseline_tagger: C{string} Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This is fast and fine for a demo, but is likely to generalize worse on unseen data. Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). """ # defaults baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER if templates is None: from nltk.tag.brill import brill24, describe_template_sets # some pre-built template sets taken from typical systems or publications are # available. Print a list with describe_template_sets() # for instance: templates = brill24() (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ) # creating (or reloading from cache) a baseline tagger (unigram tagger) # this is just a mechanism for getting deterministic output from the baseline between # python versions if cache_baseline_tagger: if not os.path.exists(cache_baseline_tagger): baseline_tagger = UnigramTagger( baseline_data, backoff=baseline_backoff_tagger ) with open(cache_baseline_tagger, "w") as print_rules: pickle.dump(baseline_tagger, print_rules) print( "Trained baseline tagger, pickled it to {}".format( cache_baseline_tagger ) ) with open(cache_baseline_tagger) as print_rules: baseline_tagger = pickle.load(print_rules) print(f"Reloaded pickled tagger from {cache_baseline_tagger}") else: baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) print("Trained baseline tagger") if gold_data: print( " Accuracy on test set: {:0.4f}".format( baseline_tagger.accuracy(gold_data) ) ) # creating a Brill tagger tbrill = time.time() trainer = BrillTaggerTrainer( baseline_tagger, templates, trace, ruleformat=ruleformat ) print("Training tbl tagger...") brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") if gold_data: print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) # printing the learned rules, if learned silently if trace == 1: print("\nLearned rules: ") for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): print(f"{ruleno:4d} {rule.format(ruleformat):s}") # printing template statistics (optionally including comparison with the training data) # note: if not separate_baseline_data, then baseline accuracy will be artificially high if incremental_stats: print( "Incrementally tagging the test data, collecting individual rule statistics" ) (taggedtest, teststats) = brill_tagger.batch_tag_incremental( testing_data, gold_data ) print(" Rule statistics collected") if not separate_baseline_data: print( "WARNING: train_stats asked for separate_baseline_data=True; the baseline " "will be artificially high" ) trainstats = brill_tagger.train_stats() if template_stats: brill_tagger.print_template_statistics(teststats) if learning_curve_output: _demo_plot( learning_curve_output, teststats, trainstats, take=learning_curve_take ) print(f"Wrote plot of learning curve to {learning_curve_output}") else: print("Tagging the test data") taggedtest = brill_tagger.tag_sents(testing_data) if template_stats: brill_tagger.print_template_statistics() # writing error analysis to file if error_output is not None: with open(error_output, "w") as f: f.write("Errors for Brill Tagger %r\n\n" % serialize_output) f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") print(f"Wrote tagger errors including context to {error_output}") # serializing the tagger to a pickle file and reloading (just to see it works) if serialize_output is not None: taggedtest = brill_tagger.tag_sents(testing_data) with open(serialize_output, "w") as print_rules: pickle.dump(brill_tagger, print_rules) print(f"Wrote pickled tagger to {serialize_output}") with open(serialize_output) as print_rules: brill_tagger_reloaded = pickle.load(print_rules) print(f"Reloaded pickled tagger from {serialize_output}") taggedtest_reloaded = brill_tagger.tag_sents(testing_data) if taggedtest == taggedtest_reloaded: print("Reloaded tagger tried on test set, results identical") else: print("PROBLEM: Reloaded tagger gave different results on test set") The provided code snippet includes necessary dependencies for implementing the `demo_repr_rule_format` function. Write a Python function `def demo_repr_rule_format()` to solve the following problem: Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose")) Here is the function: def demo_repr_rule_format(): """ Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose")) """ postag(ruleformat="repr")
Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
170,793
import os import pickle import random import time from nltk.corpus import treebank from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger from nltk.tag.brill import Pos, Word from nltk.tbl import Template, error_list def postag( templates=None, tagged_data=None, num_sents=1000, max_rules=300, min_score=3, min_acc=None, train=0.8, trace=3, randomize=False, ruleformat="str", incremental_stats=False, template_stats=False, error_output=None, serialize_output=None, learning_curve_output=None, learning_curve_take=300, baseline_backoff_tagger=None, separate_baseline_data=False, cache_baseline_tagger=None, ): """ Brill Tagger Demonstration :param templates: how many sentences of training and testing data to use :type templates: list of Template :param tagged_data: maximum number of rule instances to create :type tagged_data: C{int} :param num_sents: how many sentences of training and testing data to use :type num_sents: C{int} :param max_rules: maximum number of rule instances to create :type max_rules: C{int} :param min_score: the minimum score for a rule in order for it to be considered :type min_score: C{int} :param min_acc: the minimum score for a rule in order for it to be considered :type min_acc: C{float} :param train: the fraction of the the corpus to be used for training (1=all) :type train: C{float} :param trace: the level of diagnostic tracing output to produce (0-4) :type trace: C{int} :param randomize: whether the training data should be a random subset of the corpus :type randomize: C{bool} :param ruleformat: rule output format, one of "str", "repr", "verbose" :type ruleformat: C{str} :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) :type incremental_stats: C{bool} :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing :type template_stats: C{bool} :param error_output: the file where errors will be saved :type error_output: C{string} :param serialize_output: the file where the learned tbl tagger will be saved :type serialize_output: C{string} :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) :type learning_curve_output: C{string} :param learning_curve_take: how many rules plotted :type learning_curve_take: C{int} :param baseline_backoff_tagger: the file where rules will be saved :type baseline_backoff_tagger: tagger :param separate_baseline_data: use a fraction of the training data exclusively for training baseline :type separate_baseline_data: C{bool} :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions) :type cache_baseline_tagger: C{string} Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This is fast and fine for a demo, but is likely to generalize worse on unseen data. Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). """ # defaults baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER if templates is None: from nltk.tag.brill import brill24, describe_template_sets # some pre-built template sets taken from typical systems or publications are # available. Print a list with describe_template_sets() # for instance: templates = brill24() (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ) # creating (or reloading from cache) a baseline tagger (unigram tagger) # this is just a mechanism for getting deterministic output from the baseline between # python versions if cache_baseline_tagger: if not os.path.exists(cache_baseline_tagger): baseline_tagger = UnigramTagger( baseline_data, backoff=baseline_backoff_tagger ) with open(cache_baseline_tagger, "w") as print_rules: pickle.dump(baseline_tagger, print_rules) print( "Trained baseline tagger, pickled it to {}".format( cache_baseline_tagger ) ) with open(cache_baseline_tagger) as print_rules: baseline_tagger = pickle.load(print_rules) print(f"Reloaded pickled tagger from {cache_baseline_tagger}") else: baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) print("Trained baseline tagger") if gold_data: print( " Accuracy on test set: {:0.4f}".format( baseline_tagger.accuracy(gold_data) ) ) # creating a Brill tagger tbrill = time.time() trainer = BrillTaggerTrainer( baseline_tagger, templates, trace, ruleformat=ruleformat ) print("Training tbl tagger...") brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") if gold_data: print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) # printing the learned rules, if learned silently if trace == 1: print("\nLearned rules: ") for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): print(f"{ruleno:4d} {rule.format(ruleformat):s}") # printing template statistics (optionally including comparison with the training data) # note: if not separate_baseline_data, then baseline accuracy will be artificially high if incremental_stats: print( "Incrementally tagging the test data, collecting individual rule statistics" ) (taggedtest, teststats) = brill_tagger.batch_tag_incremental( testing_data, gold_data ) print(" Rule statistics collected") if not separate_baseline_data: print( "WARNING: train_stats asked for separate_baseline_data=True; the baseline " "will be artificially high" ) trainstats = brill_tagger.train_stats() if template_stats: brill_tagger.print_template_statistics(teststats) if learning_curve_output: _demo_plot( learning_curve_output, teststats, trainstats, take=learning_curve_take ) print(f"Wrote plot of learning curve to {learning_curve_output}") else: print("Tagging the test data") taggedtest = brill_tagger.tag_sents(testing_data) if template_stats: brill_tagger.print_template_statistics() # writing error analysis to file if error_output is not None: with open(error_output, "w") as f: f.write("Errors for Brill Tagger %r\n\n" % serialize_output) f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") print(f"Wrote tagger errors including context to {error_output}") # serializing the tagger to a pickle file and reloading (just to see it works) if serialize_output is not None: taggedtest = brill_tagger.tag_sents(testing_data) with open(serialize_output, "w") as print_rules: pickle.dump(brill_tagger, print_rules) print(f"Wrote pickled tagger to {serialize_output}") with open(serialize_output) as print_rules: brill_tagger_reloaded = pickle.load(print_rules) print(f"Reloaded pickled tagger from {serialize_output}") taggedtest_reloaded = brill_tagger.tag_sents(testing_data) if taggedtest == taggedtest_reloaded: print("Reloaded tagger tried on test set, results identical") else: print("PROBLEM: Reloaded tagger gave different results on test set") The provided code snippet includes necessary dependencies for implementing the `demo_str_rule_format` function. Write a Python function `def demo_str_rule_format()` to solve the following problem: Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose")) Here is the function: def demo_str_rule_format(): """ Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose")) """ postag(ruleformat="str")
Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
170,794
import os import pickle import random import time from nltk.corpus import treebank from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger from nltk.tag.brill import Pos, Word from nltk.tbl import Template, error_list def postag( templates=None, tagged_data=None, num_sents=1000, max_rules=300, min_score=3, min_acc=None, train=0.8, trace=3, randomize=False, ruleformat="str", incremental_stats=False, template_stats=False, error_output=None, serialize_output=None, learning_curve_output=None, learning_curve_take=300, baseline_backoff_tagger=None, separate_baseline_data=False, cache_baseline_tagger=None, ): """ Brill Tagger Demonstration :param templates: how many sentences of training and testing data to use :type templates: list of Template :param tagged_data: maximum number of rule instances to create :type tagged_data: C{int} :param num_sents: how many sentences of training and testing data to use :type num_sents: C{int} :param max_rules: maximum number of rule instances to create :type max_rules: C{int} :param min_score: the minimum score for a rule in order for it to be considered :type min_score: C{int} :param min_acc: the minimum score for a rule in order for it to be considered :type min_acc: C{float} :param train: the fraction of the the corpus to be used for training (1=all) :type train: C{float} :param trace: the level of diagnostic tracing output to produce (0-4) :type trace: C{int} :param randomize: whether the training data should be a random subset of the corpus :type randomize: C{bool} :param ruleformat: rule output format, one of "str", "repr", "verbose" :type ruleformat: C{str} :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) :type incremental_stats: C{bool} :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing :type template_stats: C{bool} :param error_output: the file where errors will be saved :type error_output: C{string} :param serialize_output: the file where the learned tbl tagger will be saved :type serialize_output: C{string} :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) :type learning_curve_output: C{string} :param learning_curve_take: how many rules plotted :type learning_curve_take: C{int} :param baseline_backoff_tagger: the file where rules will be saved :type baseline_backoff_tagger: tagger :param separate_baseline_data: use a fraction of the training data exclusively for training baseline :type separate_baseline_data: C{bool} :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions) :type cache_baseline_tagger: C{string} Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This is fast and fine for a demo, but is likely to generalize worse on unseen data. Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). """ # defaults baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER if templates is None: from nltk.tag.brill import brill24, describe_template_sets # some pre-built template sets taken from typical systems or publications are # available. Print a list with describe_template_sets() # for instance: templates = brill24() (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ) # creating (or reloading from cache) a baseline tagger (unigram tagger) # this is just a mechanism for getting deterministic output from the baseline between # python versions if cache_baseline_tagger: if not os.path.exists(cache_baseline_tagger): baseline_tagger = UnigramTagger( baseline_data, backoff=baseline_backoff_tagger ) with open(cache_baseline_tagger, "w") as print_rules: pickle.dump(baseline_tagger, print_rules) print( "Trained baseline tagger, pickled it to {}".format( cache_baseline_tagger ) ) with open(cache_baseline_tagger) as print_rules: baseline_tagger = pickle.load(print_rules) print(f"Reloaded pickled tagger from {cache_baseline_tagger}") else: baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) print("Trained baseline tagger") if gold_data: print( " Accuracy on test set: {:0.4f}".format( baseline_tagger.accuracy(gold_data) ) ) # creating a Brill tagger tbrill = time.time() trainer = BrillTaggerTrainer( baseline_tagger, templates, trace, ruleformat=ruleformat ) print("Training tbl tagger...") brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") if gold_data: print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) # printing the learned rules, if learned silently if trace == 1: print("\nLearned rules: ") for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): print(f"{ruleno:4d} {rule.format(ruleformat):s}") # printing template statistics (optionally including comparison with the training data) # note: if not separate_baseline_data, then baseline accuracy will be artificially high if incremental_stats: print( "Incrementally tagging the test data, collecting individual rule statistics" ) (taggedtest, teststats) = brill_tagger.batch_tag_incremental( testing_data, gold_data ) print(" Rule statistics collected") if not separate_baseline_data: print( "WARNING: train_stats asked for separate_baseline_data=True; the baseline " "will be artificially high" ) trainstats = brill_tagger.train_stats() if template_stats: brill_tagger.print_template_statistics(teststats) if learning_curve_output: _demo_plot( learning_curve_output, teststats, trainstats, take=learning_curve_take ) print(f"Wrote plot of learning curve to {learning_curve_output}") else: print("Tagging the test data") taggedtest = brill_tagger.tag_sents(testing_data) if template_stats: brill_tagger.print_template_statistics() # writing error analysis to file if error_output is not None: with open(error_output, "w") as f: f.write("Errors for Brill Tagger %r\n\n" % serialize_output) f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") print(f"Wrote tagger errors including context to {error_output}") # serializing the tagger to a pickle file and reloading (just to see it works) if serialize_output is not None: taggedtest = brill_tagger.tag_sents(testing_data) with open(serialize_output, "w") as print_rules: pickle.dump(brill_tagger, print_rules) print(f"Wrote pickled tagger to {serialize_output}") with open(serialize_output) as print_rules: brill_tagger_reloaded = pickle.load(print_rules) print(f"Reloaded pickled tagger from {serialize_output}") taggedtest_reloaded = brill_tagger.tag_sents(testing_data) if taggedtest == taggedtest_reloaded: print("Reloaded tagger tried on test set, results identical") else: print("PROBLEM: Reloaded tagger gave different results on test set") The provided code snippet includes necessary dependencies for implementing the `demo_verbose_rule_format` function. Write a Python function `def demo_verbose_rule_format()` to solve the following problem: Exemplify Rule.format("verbose") Here is the function: def demo_verbose_rule_format(): """ Exemplify Rule.format("verbose") """ postag(ruleformat="verbose")
Exemplify Rule.format("verbose")
170,795
import os import pickle import random import time from nltk.corpus import treebank from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger from nltk.tag.brill import Pos, Word from nltk.tbl import Template, error_list def postag( templates=None, tagged_data=None, num_sents=1000, max_rules=300, min_score=3, min_acc=None, train=0.8, trace=3, randomize=False, ruleformat="str", incremental_stats=False, template_stats=False, error_output=None, serialize_output=None, learning_curve_output=None, learning_curve_take=300, baseline_backoff_tagger=None, separate_baseline_data=False, cache_baseline_tagger=None, ): """ Brill Tagger Demonstration :param templates: how many sentences of training and testing data to use :type templates: list of Template :param tagged_data: maximum number of rule instances to create :type tagged_data: C{int} :param num_sents: how many sentences of training and testing data to use :type num_sents: C{int} :param max_rules: maximum number of rule instances to create :type max_rules: C{int} :param min_score: the minimum score for a rule in order for it to be considered :type min_score: C{int} :param min_acc: the minimum score for a rule in order for it to be considered :type min_acc: C{float} :param train: the fraction of the the corpus to be used for training (1=all) :type train: C{float} :param trace: the level of diagnostic tracing output to produce (0-4) :type trace: C{int} :param randomize: whether the training data should be a random subset of the corpus :type randomize: C{bool} :param ruleformat: rule output format, one of "str", "repr", "verbose" :type ruleformat: C{str} :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) :type incremental_stats: C{bool} :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing :type template_stats: C{bool} :param error_output: the file where errors will be saved :type error_output: C{string} :param serialize_output: the file where the learned tbl tagger will be saved :type serialize_output: C{string} :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) :type learning_curve_output: C{string} :param learning_curve_take: how many rules plotted :type learning_curve_take: C{int} :param baseline_backoff_tagger: the file where rules will be saved :type baseline_backoff_tagger: tagger :param separate_baseline_data: use a fraction of the training data exclusively for training baseline :type separate_baseline_data: C{bool} :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions) :type cache_baseline_tagger: C{string} Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This is fast and fine for a demo, but is likely to generalize worse on unseen data. Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). """ # defaults baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER if templates is None: from nltk.tag.brill import brill24, describe_template_sets # some pre-built template sets taken from typical systems or publications are # available. Print a list with describe_template_sets() # for instance: templates = brill24() (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ) # creating (or reloading from cache) a baseline tagger (unigram tagger) # this is just a mechanism for getting deterministic output from the baseline between # python versions if cache_baseline_tagger: if not os.path.exists(cache_baseline_tagger): baseline_tagger = UnigramTagger( baseline_data, backoff=baseline_backoff_tagger ) with open(cache_baseline_tagger, "w") as print_rules: pickle.dump(baseline_tagger, print_rules) print( "Trained baseline tagger, pickled it to {}".format( cache_baseline_tagger ) ) with open(cache_baseline_tagger) as print_rules: baseline_tagger = pickle.load(print_rules) print(f"Reloaded pickled tagger from {cache_baseline_tagger}") else: baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) print("Trained baseline tagger") if gold_data: print( " Accuracy on test set: {:0.4f}".format( baseline_tagger.accuracy(gold_data) ) ) # creating a Brill tagger tbrill = time.time() trainer = BrillTaggerTrainer( baseline_tagger, templates, trace, ruleformat=ruleformat ) print("Training tbl tagger...") brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") if gold_data: print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) # printing the learned rules, if learned silently if trace == 1: print("\nLearned rules: ") for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): print(f"{ruleno:4d} {rule.format(ruleformat):s}") # printing template statistics (optionally including comparison with the training data) # note: if not separate_baseline_data, then baseline accuracy will be artificially high if incremental_stats: print( "Incrementally tagging the test data, collecting individual rule statistics" ) (taggedtest, teststats) = brill_tagger.batch_tag_incremental( testing_data, gold_data ) print(" Rule statistics collected") if not separate_baseline_data: print( "WARNING: train_stats asked for separate_baseline_data=True; the baseline " "will be artificially high" ) trainstats = brill_tagger.train_stats() if template_stats: brill_tagger.print_template_statistics(teststats) if learning_curve_output: _demo_plot( learning_curve_output, teststats, trainstats, take=learning_curve_take ) print(f"Wrote plot of learning curve to {learning_curve_output}") else: print("Tagging the test data") taggedtest = brill_tagger.tag_sents(testing_data) if template_stats: brill_tagger.print_template_statistics() # writing error analysis to file if error_output is not None: with open(error_output, "w") as f: f.write("Errors for Brill Tagger %r\n\n" % serialize_output) f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") print(f"Wrote tagger errors including context to {error_output}") # serializing the tagger to a pickle file and reloading (just to see it works) if serialize_output is not None: taggedtest = brill_tagger.tag_sents(testing_data) with open(serialize_output, "w") as print_rules: pickle.dump(brill_tagger, print_rules) print(f"Wrote pickled tagger to {serialize_output}") with open(serialize_output) as print_rules: brill_tagger_reloaded = pickle.load(print_rules) print(f"Reloaded pickled tagger from {serialize_output}") taggedtest_reloaded = brill_tagger.tag_sents(testing_data) if taggedtest == taggedtest_reloaded: print("Reloaded tagger tried on test set, results identical") else: print("PROBLEM: Reloaded tagger gave different results on test set") class Pos(Feature): """ Feature which examines the tags of nearby tokens. """ json_tag = "nltk.tag.brill.Pos" def extract_property(tokens, index): """@return: The given token's tag.""" return tokens[index][1] The provided code snippet includes necessary dependencies for implementing the `demo_multiposition_feature` function. Write a Python function `def demo_multiposition_feature()` to solve the following problem: The feature/s of a template takes a list of positions relative to the current word where the feature should be looked for, conceptually joined by logical OR. For instance, Pos([-1, 1]), given a value V, will hold whenever V is found one step to the left and/or one step to the right. For contiguous ranges, a 2-arg form giving inclusive end points can also be used: Pos(-3, -1) is the same as the arg below. Here is the function: def demo_multiposition_feature(): """ The feature/s of a template takes a list of positions relative to the current word where the feature should be looked for, conceptually joined by logical OR. For instance, Pos([-1, 1]), given a value V, will hold whenever V is found one step to the left and/or one step to the right. For contiguous ranges, a 2-arg form giving inclusive end points can also be used: Pos(-3, -1) is the same as the arg below. """ postag(templates=[Template(Pos([-3, -2, -1]))])
The feature/s of a template takes a list of positions relative to the current word where the feature should be looked for, conceptually joined by logical OR. For instance, Pos([-1, 1]), given a value V, will hold whenever V is found one step to the left and/or one step to the right. For contiguous ranges, a 2-arg form giving inclusive end points can also be used: Pos(-3, -1) is the same as the arg below.
170,796
import os import pickle import random import time from nltk.corpus import treebank from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger from nltk.tag.brill import Pos, Word from nltk.tbl import Template, error_list def postag( templates=None, tagged_data=None, num_sents=1000, max_rules=300, min_score=3, min_acc=None, train=0.8, trace=3, randomize=False, ruleformat="str", incremental_stats=False, template_stats=False, error_output=None, serialize_output=None, learning_curve_output=None, learning_curve_take=300, baseline_backoff_tagger=None, separate_baseline_data=False, cache_baseline_tagger=None, ): """ Brill Tagger Demonstration :param templates: how many sentences of training and testing data to use :type templates: list of Template :param tagged_data: maximum number of rule instances to create :type tagged_data: C{int} :param num_sents: how many sentences of training and testing data to use :type num_sents: C{int} :param max_rules: maximum number of rule instances to create :type max_rules: C{int} :param min_score: the minimum score for a rule in order for it to be considered :type min_score: C{int} :param min_acc: the minimum score for a rule in order for it to be considered :type min_acc: C{float} :param train: the fraction of the the corpus to be used for training (1=all) :type train: C{float} :param trace: the level of diagnostic tracing output to produce (0-4) :type trace: C{int} :param randomize: whether the training data should be a random subset of the corpus :type randomize: C{bool} :param ruleformat: rule output format, one of "str", "repr", "verbose" :type ruleformat: C{str} :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) :type incremental_stats: C{bool} :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing :type template_stats: C{bool} :param error_output: the file where errors will be saved :type error_output: C{string} :param serialize_output: the file where the learned tbl tagger will be saved :type serialize_output: C{string} :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) :type learning_curve_output: C{string} :param learning_curve_take: how many rules plotted :type learning_curve_take: C{int} :param baseline_backoff_tagger: the file where rules will be saved :type baseline_backoff_tagger: tagger :param separate_baseline_data: use a fraction of the training data exclusively for training baseline :type separate_baseline_data: C{bool} :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions) :type cache_baseline_tagger: C{string} Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This is fast and fine for a demo, but is likely to generalize worse on unseen data. Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). """ # defaults baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER if templates is None: from nltk.tag.brill import brill24, describe_template_sets # some pre-built template sets taken from typical systems or publications are # available. Print a list with describe_template_sets() # for instance: templates = brill24() (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ) # creating (or reloading from cache) a baseline tagger (unigram tagger) # this is just a mechanism for getting deterministic output from the baseline between # python versions if cache_baseline_tagger: if not os.path.exists(cache_baseline_tagger): baseline_tagger = UnigramTagger( baseline_data, backoff=baseline_backoff_tagger ) with open(cache_baseline_tagger, "w") as print_rules: pickle.dump(baseline_tagger, print_rules) print( "Trained baseline tagger, pickled it to {}".format( cache_baseline_tagger ) ) with open(cache_baseline_tagger) as print_rules: baseline_tagger = pickle.load(print_rules) print(f"Reloaded pickled tagger from {cache_baseline_tagger}") else: baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) print("Trained baseline tagger") if gold_data: print( " Accuracy on test set: {:0.4f}".format( baseline_tagger.accuracy(gold_data) ) ) # creating a Brill tagger tbrill = time.time() trainer = BrillTaggerTrainer( baseline_tagger, templates, trace, ruleformat=ruleformat ) print("Training tbl tagger...") brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") if gold_data: print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) # printing the learned rules, if learned silently if trace == 1: print("\nLearned rules: ") for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): print(f"{ruleno:4d} {rule.format(ruleformat):s}") # printing template statistics (optionally including comparison with the training data) # note: if not separate_baseline_data, then baseline accuracy will be artificially high if incremental_stats: print( "Incrementally tagging the test data, collecting individual rule statistics" ) (taggedtest, teststats) = brill_tagger.batch_tag_incremental( testing_data, gold_data ) print(" Rule statistics collected") if not separate_baseline_data: print( "WARNING: train_stats asked for separate_baseline_data=True; the baseline " "will be artificially high" ) trainstats = brill_tagger.train_stats() if template_stats: brill_tagger.print_template_statistics(teststats) if learning_curve_output: _demo_plot( learning_curve_output, teststats, trainstats, take=learning_curve_take ) print(f"Wrote plot of learning curve to {learning_curve_output}") else: print("Tagging the test data") taggedtest = brill_tagger.tag_sents(testing_data) if template_stats: brill_tagger.print_template_statistics() # writing error analysis to file if error_output is not None: with open(error_output, "w") as f: f.write("Errors for Brill Tagger %r\n\n" % serialize_output) f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") print(f"Wrote tagger errors including context to {error_output}") # serializing the tagger to a pickle file and reloading (just to see it works) if serialize_output is not None: taggedtest = brill_tagger.tag_sents(testing_data) with open(serialize_output, "w") as print_rules: pickle.dump(brill_tagger, print_rules) print(f"Wrote pickled tagger to {serialize_output}") with open(serialize_output) as print_rules: brill_tagger_reloaded = pickle.load(print_rules) print(f"Reloaded pickled tagger from {serialize_output}") taggedtest_reloaded = brill_tagger.tag_sents(testing_data) if taggedtest == taggedtest_reloaded: print("Reloaded tagger tried on test set, results identical") else: print("PROBLEM: Reloaded tagger gave different results on test set") class Word(Feature): """ Feature which examines the text (word) of nearby tokens. """ json_tag = "nltk.tag.brill.Word" def extract_property(tokens, index): """@return: The given token's text.""" return tokens[index][0] class Pos(Feature): """ Feature which examines the tags of nearby tokens. """ json_tag = "nltk.tag.brill.Pos" def extract_property(tokens, index): """@return: The given token's tag.""" return tokens[index][1] The provided code snippet includes necessary dependencies for implementing the `demo_multifeature_template` function. Write a Python function `def demo_multifeature_template()` to solve the following problem: Templates can have more than a single feature. Here is the function: def demo_multifeature_template(): """ Templates can have more than a single feature. """ postag(templates=[Template(Word([0]), Pos([-2, -1]))])
Templates can have more than a single feature.
170,797
import os import pickle import random import time from nltk.corpus import treebank from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger from nltk.tag.brill import Pos, Word from nltk.tbl import Template, error_list def postag( templates=None, tagged_data=None, num_sents=1000, max_rules=300, min_score=3, min_acc=None, train=0.8, trace=3, randomize=False, ruleformat="str", incremental_stats=False, template_stats=False, error_output=None, serialize_output=None, learning_curve_output=None, learning_curve_take=300, baseline_backoff_tagger=None, separate_baseline_data=False, cache_baseline_tagger=None, ): """ Brill Tagger Demonstration :param templates: how many sentences of training and testing data to use :type templates: list of Template :param tagged_data: maximum number of rule instances to create :type tagged_data: C{int} :param num_sents: how many sentences of training and testing data to use :type num_sents: C{int} :param max_rules: maximum number of rule instances to create :type max_rules: C{int} :param min_score: the minimum score for a rule in order for it to be considered :type min_score: C{int} :param min_acc: the minimum score for a rule in order for it to be considered :type min_acc: C{float} :param train: the fraction of the the corpus to be used for training (1=all) :type train: C{float} :param trace: the level of diagnostic tracing output to produce (0-4) :type trace: C{int} :param randomize: whether the training data should be a random subset of the corpus :type randomize: C{bool} :param ruleformat: rule output format, one of "str", "repr", "verbose" :type ruleformat: C{str} :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) :type incremental_stats: C{bool} :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing :type template_stats: C{bool} :param error_output: the file where errors will be saved :type error_output: C{string} :param serialize_output: the file where the learned tbl tagger will be saved :type serialize_output: C{string} :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) :type learning_curve_output: C{string} :param learning_curve_take: how many rules plotted :type learning_curve_take: C{int} :param baseline_backoff_tagger: the file where rules will be saved :type baseline_backoff_tagger: tagger :param separate_baseline_data: use a fraction of the training data exclusively for training baseline :type separate_baseline_data: C{bool} :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions) :type cache_baseline_tagger: C{string} Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This is fast and fine for a demo, but is likely to generalize worse on unseen data. Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). """ # defaults baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER if templates is None: from nltk.tag.brill import brill24, describe_template_sets # some pre-built template sets taken from typical systems or publications are # available. Print a list with describe_template_sets() # for instance: templates = brill24() (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ) # creating (or reloading from cache) a baseline tagger (unigram tagger) # this is just a mechanism for getting deterministic output from the baseline between # python versions if cache_baseline_tagger: if not os.path.exists(cache_baseline_tagger): baseline_tagger = UnigramTagger( baseline_data, backoff=baseline_backoff_tagger ) with open(cache_baseline_tagger, "w") as print_rules: pickle.dump(baseline_tagger, print_rules) print( "Trained baseline tagger, pickled it to {}".format( cache_baseline_tagger ) ) with open(cache_baseline_tagger) as print_rules: baseline_tagger = pickle.load(print_rules) print(f"Reloaded pickled tagger from {cache_baseline_tagger}") else: baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) print("Trained baseline tagger") if gold_data: print( " Accuracy on test set: {:0.4f}".format( baseline_tagger.accuracy(gold_data) ) ) # creating a Brill tagger tbrill = time.time() trainer = BrillTaggerTrainer( baseline_tagger, templates, trace, ruleformat=ruleformat ) print("Training tbl tagger...") brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") if gold_data: print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) # printing the learned rules, if learned silently if trace == 1: print("\nLearned rules: ") for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): print(f"{ruleno:4d} {rule.format(ruleformat):s}") # printing template statistics (optionally including comparison with the training data) # note: if not separate_baseline_data, then baseline accuracy will be artificially high if incremental_stats: print( "Incrementally tagging the test data, collecting individual rule statistics" ) (taggedtest, teststats) = brill_tagger.batch_tag_incremental( testing_data, gold_data ) print(" Rule statistics collected") if not separate_baseline_data: print( "WARNING: train_stats asked for separate_baseline_data=True; the baseline " "will be artificially high" ) trainstats = brill_tagger.train_stats() if template_stats: brill_tagger.print_template_statistics(teststats) if learning_curve_output: _demo_plot( learning_curve_output, teststats, trainstats, take=learning_curve_take ) print(f"Wrote plot of learning curve to {learning_curve_output}") else: print("Tagging the test data") taggedtest = brill_tagger.tag_sents(testing_data) if template_stats: brill_tagger.print_template_statistics() # writing error analysis to file if error_output is not None: with open(error_output, "w") as f: f.write("Errors for Brill Tagger %r\n\n" % serialize_output) f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") print(f"Wrote tagger errors including context to {error_output}") # serializing the tagger to a pickle file and reloading (just to see it works) if serialize_output is not None: taggedtest = brill_tagger.tag_sents(testing_data) with open(serialize_output, "w") as print_rules: pickle.dump(brill_tagger, print_rules) print(f"Wrote pickled tagger to {serialize_output}") with open(serialize_output) as print_rules: brill_tagger_reloaded = pickle.load(print_rules) print(f"Reloaded pickled tagger from {serialize_output}") taggedtest_reloaded = brill_tagger.tag_sents(testing_data) if taggedtest == taggedtest_reloaded: print("Reloaded tagger tried on test set, results identical") else: print("PROBLEM: Reloaded tagger gave different results on test set") The provided code snippet includes necessary dependencies for implementing the `demo_template_statistics` function. Write a Python function `def demo_template_statistics()` to solve the following problem: Show aggregate statistics per template. Little used templates are candidates for deletion, much used templates may possibly be refined. Deleting unused templates is mostly about saving time and/or space: training is basically O(T) in the number of templates T (also in terms of memory usage, which often will be the limiting factor). Here is the function: def demo_template_statistics(): """ Show aggregate statistics per template. Little used templates are candidates for deletion, much used templates may possibly be refined. Deleting unused templates is mostly about saving time and/or space: training is basically O(T) in the number of templates T (also in terms of memory usage, which often will be the limiting factor). """ postag(incremental_stats=True, template_stats=True)
Show aggregate statistics per template. Little used templates are candidates for deletion, much used templates may possibly be refined. Deleting unused templates is mostly about saving time and/or space: training is basically O(T) in the number of templates T (also in terms of memory usage, which often will be the limiting factor).
170,798
import os import pickle import random import time from nltk.corpus import treebank from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger from nltk.tag.brill import Pos, Word from nltk.tbl import Template, error_list def postag( templates=None, tagged_data=None, num_sents=1000, max_rules=300, min_score=3, min_acc=None, train=0.8, trace=3, randomize=False, ruleformat="str", incremental_stats=False, template_stats=False, error_output=None, serialize_output=None, learning_curve_output=None, learning_curve_take=300, baseline_backoff_tagger=None, separate_baseline_data=False, cache_baseline_tagger=None, ): """ Brill Tagger Demonstration :param templates: how many sentences of training and testing data to use :type templates: list of Template :param tagged_data: maximum number of rule instances to create :type tagged_data: C{int} :param num_sents: how many sentences of training and testing data to use :type num_sents: C{int} :param max_rules: maximum number of rule instances to create :type max_rules: C{int} :param min_score: the minimum score for a rule in order for it to be considered :type min_score: C{int} :param min_acc: the minimum score for a rule in order for it to be considered :type min_acc: C{float} :param train: the fraction of the the corpus to be used for training (1=all) :type train: C{float} :param trace: the level of diagnostic tracing output to produce (0-4) :type trace: C{int} :param randomize: whether the training data should be a random subset of the corpus :type randomize: C{bool} :param ruleformat: rule output format, one of "str", "repr", "verbose" :type ruleformat: C{str} :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) :type incremental_stats: C{bool} :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing :type template_stats: C{bool} :param error_output: the file where errors will be saved :type error_output: C{string} :param serialize_output: the file where the learned tbl tagger will be saved :type serialize_output: C{string} :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) :type learning_curve_output: C{string} :param learning_curve_take: how many rules plotted :type learning_curve_take: C{int} :param baseline_backoff_tagger: the file where rules will be saved :type baseline_backoff_tagger: tagger :param separate_baseline_data: use a fraction of the training data exclusively for training baseline :type separate_baseline_data: C{bool} :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions) :type cache_baseline_tagger: C{string} Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This is fast and fine for a demo, but is likely to generalize worse on unseen data. Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). """ # defaults baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER if templates is None: from nltk.tag.brill import brill24, describe_template_sets # some pre-built template sets taken from typical systems or publications are # available. Print a list with describe_template_sets() # for instance: templates = brill24() (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ) # creating (or reloading from cache) a baseline tagger (unigram tagger) # this is just a mechanism for getting deterministic output from the baseline between # python versions if cache_baseline_tagger: if not os.path.exists(cache_baseline_tagger): baseline_tagger = UnigramTagger( baseline_data, backoff=baseline_backoff_tagger ) with open(cache_baseline_tagger, "w") as print_rules: pickle.dump(baseline_tagger, print_rules) print( "Trained baseline tagger, pickled it to {}".format( cache_baseline_tagger ) ) with open(cache_baseline_tagger) as print_rules: baseline_tagger = pickle.load(print_rules) print(f"Reloaded pickled tagger from {cache_baseline_tagger}") else: baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) print("Trained baseline tagger") if gold_data: print( " Accuracy on test set: {:0.4f}".format( baseline_tagger.accuracy(gold_data) ) ) # creating a Brill tagger tbrill = time.time() trainer = BrillTaggerTrainer( baseline_tagger, templates, trace, ruleformat=ruleformat ) print("Training tbl tagger...") brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") if gold_data: print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) # printing the learned rules, if learned silently if trace == 1: print("\nLearned rules: ") for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): print(f"{ruleno:4d} {rule.format(ruleformat):s}") # printing template statistics (optionally including comparison with the training data) # note: if not separate_baseline_data, then baseline accuracy will be artificially high if incremental_stats: print( "Incrementally tagging the test data, collecting individual rule statistics" ) (taggedtest, teststats) = brill_tagger.batch_tag_incremental( testing_data, gold_data ) print(" Rule statistics collected") if not separate_baseline_data: print( "WARNING: train_stats asked for separate_baseline_data=True; the baseline " "will be artificially high" ) trainstats = brill_tagger.train_stats() if template_stats: brill_tagger.print_template_statistics(teststats) if learning_curve_output: _demo_plot( learning_curve_output, teststats, trainstats, take=learning_curve_take ) print(f"Wrote plot of learning curve to {learning_curve_output}") else: print("Tagging the test data") taggedtest = brill_tagger.tag_sents(testing_data) if template_stats: brill_tagger.print_template_statistics() # writing error analysis to file if error_output is not None: with open(error_output, "w") as f: f.write("Errors for Brill Tagger %r\n\n" % serialize_output) f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") print(f"Wrote tagger errors including context to {error_output}") # serializing the tagger to a pickle file and reloading (just to see it works) if serialize_output is not None: taggedtest = brill_tagger.tag_sents(testing_data) with open(serialize_output, "w") as print_rules: pickle.dump(brill_tagger, print_rules) print(f"Wrote pickled tagger to {serialize_output}") with open(serialize_output) as print_rules: brill_tagger_reloaded = pickle.load(print_rules) print(f"Reloaded pickled tagger from {serialize_output}") taggedtest_reloaded = brill_tagger.tag_sents(testing_data) if taggedtest == taggedtest_reloaded: print("Reloaded tagger tried on test set, results identical") else: print("PROBLEM: Reloaded tagger gave different results on test set") class Word(Feature): """ Feature which examines the text (word) of nearby tokens. """ json_tag = "nltk.tag.brill.Word" def extract_property(tokens, index): """@return: The given token's text.""" return tokens[index][0] class Pos(Feature): """ Feature which examines the tags of nearby tokens. """ json_tag = "nltk.tag.brill.Pos" def extract_property(tokens, index): """@return: The given token's tag.""" return tokens[index][1] The provided code snippet includes necessary dependencies for implementing the `demo_generated_templates` function. Write a Python function `def demo_generated_templates()` to solve the following problem: Template.expand and Feature.expand are class methods facilitating generating large amounts of templates. See their documentation for details. Note: training with 500 templates can easily fill all available even on relatively small corpora Here is the function: def demo_generated_templates(): """ Template.expand and Feature.expand are class methods facilitating generating large amounts of templates. See their documentation for details. Note: training with 500 templates can easily fill all available even on relatively small corpora """ wordtpls = Word.expand([-1, 0, 1], [1, 2], excludezero=False) tagtpls = Pos.expand([-2, -1, 0, 1], [1, 2], excludezero=True) templates = list(Template.expand([wordtpls, tagtpls], combinations=(1, 3))) print( "Generated {} templates for transformation-based learning".format( len(templates) ) ) postag(templates=templates, incremental_stats=True, template_stats=True)
Template.expand and Feature.expand are class methods facilitating generating large amounts of templates. See their documentation for details. Note: training with 500 templates can easily fill all available even on relatively small corpora
170,799
import os import pickle import random import time from nltk.corpus import treebank from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger from nltk.tag.brill import Pos, Word from nltk.tbl import Template, error_list def postag( templates=None, tagged_data=None, num_sents=1000, max_rules=300, min_score=3, min_acc=None, train=0.8, trace=3, randomize=False, ruleformat="str", incremental_stats=False, template_stats=False, error_output=None, serialize_output=None, learning_curve_output=None, learning_curve_take=300, baseline_backoff_tagger=None, separate_baseline_data=False, cache_baseline_tagger=None, ): """ Brill Tagger Demonstration :param templates: how many sentences of training and testing data to use :type templates: list of Template :param tagged_data: maximum number of rule instances to create :type tagged_data: C{int} :param num_sents: how many sentences of training and testing data to use :type num_sents: C{int} :param max_rules: maximum number of rule instances to create :type max_rules: C{int} :param min_score: the minimum score for a rule in order for it to be considered :type min_score: C{int} :param min_acc: the minimum score for a rule in order for it to be considered :type min_acc: C{float} :param train: the fraction of the the corpus to be used for training (1=all) :type train: C{float} :param trace: the level of diagnostic tracing output to produce (0-4) :type trace: C{int} :param randomize: whether the training data should be a random subset of the corpus :type randomize: C{bool} :param ruleformat: rule output format, one of "str", "repr", "verbose" :type ruleformat: C{str} :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) :type incremental_stats: C{bool} :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing :type template_stats: C{bool} :param error_output: the file where errors will be saved :type error_output: C{string} :param serialize_output: the file where the learned tbl tagger will be saved :type serialize_output: C{string} :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) :type learning_curve_output: C{string} :param learning_curve_take: how many rules plotted :type learning_curve_take: C{int} :param baseline_backoff_tagger: the file where rules will be saved :type baseline_backoff_tagger: tagger :param separate_baseline_data: use a fraction of the training data exclusively for training baseline :type separate_baseline_data: C{bool} :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions) :type cache_baseline_tagger: C{string} Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This is fast and fine for a demo, but is likely to generalize worse on unseen data. Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). """ # defaults baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER if templates is None: from nltk.tag.brill import brill24, describe_template_sets # some pre-built template sets taken from typical systems or publications are # available. Print a list with describe_template_sets() # for instance: templates = brill24() (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ) # creating (or reloading from cache) a baseline tagger (unigram tagger) # this is just a mechanism for getting deterministic output from the baseline between # python versions if cache_baseline_tagger: if not os.path.exists(cache_baseline_tagger): baseline_tagger = UnigramTagger( baseline_data, backoff=baseline_backoff_tagger ) with open(cache_baseline_tagger, "w") as print_rules: pickle.dump(baseline_tagger, print_rules) print( "Trained baseline tagger, pickled it to {}".format( cache_baseline_tagger ) ) with open(cache_baseline_tagger) as print_rules: baseline_tagger = pickle.load(print_rules) print(f"Reloaded pickled tagger from {cache_baseline_tagger}") else: baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) print("Trained baseline tagger") if gold_data: print( " Accuracy on test set: {:0.4f}".format( baseline_tagger.accuracy(gold_data) ) ) # creating a Brill tagger tbrill = time.time() trainer = BrillTaggerTrainer( baseline_tagger, templates, trace, ruleformat=ruleformat ) print("Training tbl tagger...") brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") if gold_data: print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) # printing the learned rules, if learned silently if trace == 1: print("\nLearned rules: ") for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): print(f"{ruleno:4d} {rule.format(ruleformat):s}") # printing template statistics (optionally including comparison with the training data) # note: if not separate_baseline_data, then baseline accuracy will be artificially high if incremental_stats: print( "Incrementally tagging the test data, collecting individual rule statistics" ) (taggedtest, teststats) = brill_tagger.batch_tag_incremental( testing_data, gold_data ) print(" Rule statistics collected") if not separate_baseline_data: print( "WARNING: train_stats asked for separate_baseline_data=True; the baseline " "will be artificially high" ) trainstats = brill_tagger.train_stats() if template_stats: brill_tagger.print_template_statistics(teststats) if learning_curve_output: _demo_plot( learning_curve_output, teststats, trainstats, take=learning_curve_take ) print(f"Wrote plot of learning curve to {learning_curve_output}") else: print("Tagging the test data") taggedtest = brill_tagger.tag_sents(testing_data) if template_stats: brill_tagger.print_template_statistics() # writing error analysis to file if error_output is not None: with open(error_output, "w") as f: f.write("Errors for Brill Tagger %r\n\n" % serialize_output) f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") print(f"Wrote tagger errors including context to {error_output}") # serializing the tagger to a pickle file and reloading (just to see it works) if serialize_output is not None: taggedtest = brill_tagger.tag_sents(testing_data) with open(serialize_output, "w") as print_rules: pickle.dump(brill_tagger, print_rules) print(f"Wrote pickled tagger to {serialize_output}") with open(serialize_output) as print_rules: brill_tagger_reloaded = pickle.load(print_rules) print(f"Reloaded pickled tagger from {serialize_output}") taggedtest_reloaded = brill_tagger.tag_sents(testing_data) if taggedtest == taggedtest_reloaded: print("Reloaded tagger tried on test set, results identical") else: print("PROBLEM: Reloaded tagger gave different results on test set") The provided code snippet includes necessary dependencies for implementing the `demo_learning_curve` function. Write a Python function `def demo_learning_curve()` to solve the following problem: Plot a learning curve -- the contribution on tagging accuracy of the individual rules. Note: requires matplotlib Here is the function: def demo_learning_curve(): """ Plot a learning curve -- the contribution on tagging accuracy of the individual rules. Note: requires matplotlib """ postag( incremental_stats=True, separate_baseline_data=True, learning_curve_output="learningcurve.png", )
Plot a learning curve -- the contribution on tagging accuracy of the individual rules. Note: requires matplotlib
170,800
import os import pickle import random import time from nltk.corpus import treebank from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger from nltk.tag.brill import Pos, Word from nltk.tbl import Template, error_list def postag( templates=None, tagged_data=None, num_sents=1000, max_rules=300, min_score=3, min_acc=None, train=0.8, trace=3, randomize=False, ruleformat="str", incremental_stats=False, template_stats=False, error_output=None, serialize_output=None, learning_curve_output=None, learning_curve_take=300, baseline_backoff_tagger=None, separate_baseline_data=False, cache_baseline_tagger=None, ): """ Brill Tagger Demonstration :param templates: how many sentences of training and testing data to use :type templates: list of Template :param tagged_data: maximum number of rule instances to create :type tagged_data: C{int} :param num_sents: how many sentences of training and testing data to use :type num_sents: C{int} :param max_rules: maximum number of rule instances to create :type max_rules: C{int} :param min_score: the minimum score for a rule in order for it to be considered :type min_score: C{int} :param min_acc: the minimum score for a rule in order for it to be considered :type min_acc: C{float} :param train: the fraction of the the corpus to be used for training (1=all) :type train: C{float} :param trace: the level of diagnostic tracing output to produce (0-4) :type trace: C{int} :param randomize: whether the training data should be a random subset of the corpus :type randomize: C{bool} :param ruleformat: rule output format, one of "str", "repr", "verbose" :type ruleformat: C{str} :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) :type incremental_stats: C{bool} :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing :type template_stats: C{bool} :param error_output: the file where errors will be saved :type error_output: C{string} :param serialize_output: the file where the learned tbl tagger will be saved :type serialize_output: C{string} :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) :type learning_curve_output: C{string} :param learning_curve_take: how many rules plotted :type learning_curve_take: C{int} :param baseline_backoff_tagger: the file where rules will be saved :type baseline_backoff_tagger: tagger :param separate_baseline_data: use a fraction of the training data exclusively for training baseline :type separate_baseline_data: C{bool} :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions) :type cache_baseline_tagger: C{string} Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This is fast and fine for a demo, but is likely to generalize worse on unseen data. Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). """ # defaults baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER if templates is None: from nltk.tag.brill import brill24, describe_template_sets # some pre-built template sets taken from typical systems or publications are # available. Print a list with describe_template_sets() # for instance: templates = brill24() (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ) # creating (or reloading from cache) a baseline tagger (unigram tagger) # this is just a mechanism for getting deterministic output from the baseline between # python versions if cache_baseline_tagger: if not os.path.exists(cache_baseline_tagger): baseline_tagger = UnigramTagger( baseline_data, backoff=baseline_backoff_tagger ) with open(cache_baseline_tagger, "w") as print_rules: pickle.dump(baseline_tagger, print_rules) print( "Trained baseline tagger, pickled it to {}".format( cache_baseline_tagger ) ) with open(cache_baseline_tagger) as print_rules: baseline_tagger = pickle.load(print_rules) print(f"Reloaded pickled tagger from {cache_baseline_tagger}") else: baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) print("Trained baseline tagger") if gold_data: print( " Accuracy on test set: {:0.4f}".format( baseline_tagger.accuracy(gold_data) ) ) # creating a Brill tagger tbrill = time.time() trainer = BrillTaggerTrainer( baseline_tagger, templates, trace, ruleformat=ruleformat ) print("Training tbl tagger...") brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") if gold_data: print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) # printing the learned rules, if learned silently if trace == 1: print("\nLearned rules: ") for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): print(f"{ruleno:4d} {rule.format(ruleformat):s}") # printing template statistics (optionally including comparison with the training data) # note: if not separate_baseline_data, then baseline accuracy will be artificially high if incremental_stats: print( "Incrementally tagging the test data, collecting individual rule statistics" ) (taggedtest, teststats) = brill_tagger.batch_tag_incremental( testing_data, gold_data ) print(" Rule statistics collected") if not separate_baseline_data: print( "WARNING: train_stats asked for separate_baseline_data=True; the baseline " "will be artificially high" ) trainstats = brill_tagger.train_stats() if template_stats: brill_tagger.print_template_statistics(teststats) if learning_curve_output: _demo_plot( learning_curve_output, teststats, trainstats, take=learning_curve_take ) print(f"Wrote plot of learning curve to {learning_curve_output}") else: print("Tagging the test data") taggedtest = brill_tagger.tag_sents(testing_data) if template_stats: brill_tagger.print_template_statistics() # writing error analysis to file if error_output is not None: with open(error_output, "w") as f: f.write("Errors for Brill Tagger %r\n\n" % serialize_output) f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") print(f"Wrote tagger errors including context to {error_output}") # serializing the tagger to a pickle file and reloading (just to see it works) if serialize_output is not None: taggedtest = brill_tagger.tag_sents(testing_data) with open(serialize_output, "w") as print_rules: pickle.dump(brill_tagger, print_rules) print(f"Wrote pickled tagger to {serialize_output}") with open(serialize_output) as print_rules: brill_tagger_reloaded = pickle.load(print_rules) print(f"Reloaded pickled tagger from {serialize_output}") taggedtest_reloaded = brill_tagger.tag_sents(testing_data) if taggedtest == taggedtest_reloaded: print("Reloaded tagger tried on test set, results identical") else: print("PROBLEM: Reloaded tagger gave different results on test set") The provided code snippet includes necessary dependencies for implementing the `demo_error_analysis` function. Write a Python function `def demo_error_analysis()` to solve the following problem: Writes a file with context for each erroneous word after tagging testing data Here is the function: def demo_error_analysis(): """ Writes a file with context for each erroneous word after tagging testing data """ postag(error_output="errors.txt")
Writes a file with context for each erroneous word after tagging testing data
170,801
import os import pickle import random import time from nltk.corpus import treebank from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger from nltk.tag.brill import Pos, Word from nltk.tbl import Template, error_list def postag( templates=None, tagged_data=None, num_sents=1000, max_rules=300, min_score=3, min_acc=None, train=0.8, trace=3, randomize=False, ruleformat="str", incremental_stats=False, template_stats=False, error_output=None, serialize_output=None, learning_curve_output=None, learning_curve_take=300, baseline_backoff_tagger=None, separate_baseline_data=False, cache_baseline_tagger=None, ): """ Brill Tagger Demonstration :param templates: how many sentences of training and testing data to use :type templates: list of Template :param tagged_data: maximum number of rule instances to create :type tagged_data: C{int} :param num_sents: how many sentences of training and testing data to use :type num_sents: C{int} :param max_rules: maximum number of rule instances to create :type max_rules: C{int} :param min_score: the minimum score for a rule in order for it to be considered :type min_score: C{int} :param min_acc: the minimum score for a rule in order for it to be considered :type min_acc: C{float} :param train: the fraction of the the corpus to be used for training (1=all) :type train: C{float} :param trace: the level of diagnostic tracing output to produce (0-4) :type trace: C{int} :param randomize: whether the training data should be a random subset of the corpus :type randomize: C{bool} :param ruleformat: rule output format, one of "str", "repr", "verbose" :type ruleformat: C{str} :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) :type incremental_stats: C{bool} :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing :type template_stats: C{bool} :param error_output: the file where errors will be saved :type error_output: C{string} :param serialize_output: the file where the learned tbl tagger will be saved :type serialize_output: C{string} :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) :type learning_curve_output: C{string} :param learning_curve_take: how many rules plotted :type learning_curve_take: C{int} :param baseline_backoff_tagger: the file where rules will be saved :type baseline_backoff_tagger: tagger :param separate_baseline_data: use a fraction of the training data exclusively for training baseline :type separate_baseline_data: C{bool} :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions) :type cache_baseline_tagger: C{string} Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This is fast and fine for a demo, but is likely to generalize worse on unseen data. Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). """ # defaults baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER if templates is None: from nltk.tag.brill import brill24, describe_template_sets # some pre-built template sets taken from typical systems or publications are # available. Print a list with describe_template_sets() # for instance: templates = brill24() (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ) # creating (or reloading from cache) a baseline tagger (unigram tagger) # this is just a mechanism for getting deterministic output from the baseline between # python versions if cache_baseline_tagger: if not os.path.exists(cache_baseline_tagger): baseline_tagger = UnigramTagger( baseline_data, backoff=baseline_backoff_tagger ) with open(cache_baseline_tagger, "w") as print_rules: pickle.dump(baseline_tagger, print_rules) print( "Trained baseline tagger, pickled it to {}".format( cache_baseline_tagger ) ) with open(cache_baseline_tagger) as print_rules: baseline_tagger = pickle.load(print_rules) print(f"Reloaded pickled tagger from {cache_baseline_tagger}") else: baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) print("Trained baseline tagger") if gold_data: print( " Accuracy on test set: {:0.4f}".format( baseline_tagger.accuracy(gold_data) ) ) # creating a Brill tagger tbrill = time.time() trainer = BrillTaggerTrainer( baseline_tagger, templates, trace, ruleformat=ruleformat ) print("Training tbl tagger...") brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") if gold_data: print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) # printing the learned rules, if learned silently if trace == 1: print("\nLearned rules: ") for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): print(f"{ruleno:4d} {rule.format(ruleformat):s}") # printing template statistics (optionally including comparison with the training data) # note: if not separate_baseline_data, then baseline accuracy will be artificially high if incremental_stats: print( "Incrementally tagging the test data, collecting individual rule statistics" ) (taggedtest, teststats) = brill_tagger.batch_tag_incremental( testing_data, gold_data ) print(" Rule statistics collected") if not separate_baseline_data: print( "WARNING: train_stats asked for separate_baseline_data=True; the baseline " "will be artificially high" ) trainstats = brill_tagger.train_stats() if template_stats: brill_tagger.print_template_statistics(teststats) if learning_curve_output: _demo_plot( learning_curve_output, teststats, trainstats, take=learning_curve_take ) print(f"Wrote plot of learning curve to {learning_curve_output}") else: print("Tagging the test data") taggedtest = brill_tagger.tag_sents(testing_data) if template_stats: brill_tagger.print_template_statistics() # writing error analysis to file if error_output is not None: with open(error_output, "w") as f: f.write("Errors for Brill Tagger %r\n\n" % serialize_output) f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") print(f"Wrote tagger errors including context to {error_output}") # serializing the tagger to a pickle file and reloading (just to see it works) if serialize_output is not None: taggedtest = brill_tagger.tag_sents(testing_data) with open(serialize_output, "w") as print_rules: pickle.dump(brill_tagger, print_rules) print(f"Wrote pickled tagger to {serialize_output}") with open(serialize_output) as print_rules: brill_tagger_reloaded = pickle.load(print_rules) print(f"Reloaded pickled tagger from {serialize_output}") taggedtest_reloaded = brill_tagger.tag_sents(testing_data) if taggedtest == taggedtest_reloaded: print("Reloaded tagger tried on test set, results identical") else: print("PROBLEM: Reloaded tagger gave different results on test set") The provided code snippet includes necessary dependencies for implementing the `demo_serialize_tagger` function. Write a Python function `def demo_serialize_tagger()` to solve the following problem: Serializes the learned tagger to a file in pickle format; reloads it and validates the process. Here is the function: def demo_serialize_tagger(): """ Serializes the learned tagger to a file in pickle format; reloads it and validates the process. """ postag(serialize_output="tagger.pcl")
Serializes the learned tagger to a file in pickle format; reloads it and validates the process.
170,802
import os import pickle import random import time from nltk.corpus import treebank from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger from nltk.tag.brill import Pos, Word from nltk.tbl import Template, error_list def postag( templates=None, tagged_data=None, num_sents=1000, max_rules=300, min_score=3, min_acc=None, train=0.8, trace=3, randomize=False, ruleformat="str", incremental_stats=False, template_stats=False, error_output=None, serialize_output=None, learning_curve_output=None, learning_curve_take=300, baseline_backoff_tagger=None, separate_baseline_data=False, cache_baseline_tagger=None, ): """ Brill Tagger Demonstration :param templates: how many sentences of training and testing data to use :type templates: list of Template :param tagged_data: maximum number of rule instances to create :type tagged_data: C{int} :param num_sents: how many sentences of training and testing data to use :type num_sents: C{int} :param max_rules: maximum number of rule instances to create :type max_rules: C{int} :param min_score: the minimum score for a rule in order for it to be considered :type min_score: C{int} :param min_acc: the minimum score for a rule in order for it to be considered :type min_acc: C{float} :param train: the fraction of the the corpus to be used for training (1=all) :type train: C{float} :param trace: the level of diagnostic tracing output to produce (0-4) :type trace: C{int} :param randomize: whether the training data should be a random subset of the corpus :type randomize: C{bool} :param ruleformat: rule output format, one of "str", "repr", "verbose" :type ruleformat: C{str} :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) :type incremental_stats: C{bool} :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing :type template_stats: C{bool} :param error_output: the file where errors will be saved :type error_output: C{string} :param serialize_output: the file where the learned tbl tagger will be saved :type serialize_output: C{string} :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) :type learning_curve_output: C{string} :param learning_curve_take: how many rules plotted :type learning_curve_take: C{int} :param baseline_backoff_tagger: the file where rules will be saved :type baseline_backoff_tagger: tagger :param separate_baseline_data: use a fraction of the training data exclusively for training baseline :type separate_baseline_data: C{bool} :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions) :type cache_baseline_tagger: C{string} Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This is fast and fine for a demo, but is likely to generalize worse on unseen data. Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). """ # defaults baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER if templates is None: from nltk.tag.brill import brill24, describe_template_sets # some pre-built template sets taken from typical systems or publications are # available. Print a list with describe_template_sets() # for instance: templates = brill24() (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ) # creating (or reloading from cache) a baseline tagger (unigram tagger) # this is just a mechanism for getting deterministic output from the baseline between # python versions if cache_baseline_tagger: if not os.path.exists(cache_baseline_tagger): baseline_tagger = UnigramTagger( baseline_data, backoff=baseline_backoff_tagger ) with open(cache_baseline_tagger, "w") as print_rules: pickle.dump(baseline_tagger, print_rules) print( "Trained baseline tagger, pickled it to {}".format( cache_baseline_tagger ) ) with open(cache_baseline_tagger) as print_rules: baseline_tagger = pickle.load(print_rules) print(f"Reloaded pickled tagger from {cache_baseline_tagger}") else: baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) print("Trained baseline tagger") if gold_data: print( " Accuracy on test set: {:0.4f}".format( baseline_tagger.accuracy(gold_data) ) ) # creating a Brill tagger tbrill = time.time() trainer = BrillTaggerTrainer( baseline_tagger, templates, trace, ruleformat=ruleformat ) print("Training tbl tagger...") brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") if gold_data: print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) # printing the learned rules, if learned silently if trace == 1: print("\nLearned rules: ") for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): print(f"{ruleno:4d} {rule.format(ruleformat):s}") # printing template statistics (optionally including comparison with the training data) # note: if not separate_baseline_data, then baseline accuracy will be artificially high if incremental_stats: print( "Incrementally tagging the test data, collecting individual rule statistics" ) (taggedtest, teststats) = brill_tagger.batch_tag_incremental( testing_data, gold_data ) print(" Rule statistics collected") if not separate_baseline_data: print( "WARNING: train_stats asked for separate_baseline_data=True; the baseline " "will be artificially high" ) trainstats = brill_tagger.train_stats() if template_stats: brill_tagger.print_template_statistics(teststats) if learning_curve_output: _demo_plot( learning_curve_output, teststats, trainstats, take=learning_curve_take ) print(f"Wrote plot of learning curve to {learning_curve_output}") else: print("Tagging the test data") taggedtest = brill_tagger.tag_sents(testing_data) if template_stats: brill_tagger.print_template_statistics() # writing error analysis to file if error_output is not None: with open(error_output, "w") as f: f.write("Errors for Brill Tagger %r\n\n" % serialize_output) f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") print(f"Wrote tagger errors including context to {error_output}") # serializing the tagger to a pickle file and reloading (just to see it works) if serialize_output is not None: taggedtest = brill_tagger.tag_sents(testing_data) with open(serialize_output, "w") as print_rules: pickle.dump(brill_tagger, print_rules) print(f"Wrote pickled tagger to {serialize_output}") with open(serialize_output) as print_rules: brill_tagger_reloaded = pickle.load(print_rules) print(f"Reloaded pickled tagger from {serialize_output}") taggedtest_reloaded = brill_tagger.tag_sents(testing_data) if taggedtest == taggedtest_reloaded: print("Reloaded tagger tried on test set, results identical") else: print("PROBLEM: Reloaded tagger gave different results on test set") The provided code snippet includes necessary dependencies for implementing the `demo_high_accuracy_rules` function. Write a Python function `def demo_high_accuracy_rules()` to solve the following problem: Discard rules with low accuracy. This may hurt performance a bit, but will often produce rules which are more interesting read to a human. Here is the function: def demo_high_accuracy_rules(): """ Discard rules with low accuracy. This may hurt performance a bit, but will often produce rules which are more interesting read to a human. """ postag(num_sents=3000, min_acc=0.96, min_score=10)
Discard rules with low accuracy. This may hurt performance a bit, but will often produce rules which are more interesting read to a human.
170,803
import re from collections import defaultdict from nltk.ccg.api import CCGVar, Direction, FunctionalCategory, PrimitiveCategory from nltk.internals import deprecated from nltk.sem.logic import Expression def fromstring(lex_str, include_semantics=False): def parseLexicon(lex_str): return fromstring(lex_str)
null
170,804
from abc import ABCMeta, abstractmethod from nltk.ccg.api import FunctionalCategory def forwardOnly(left, right): return left.dir().is_forward()
null
170,805
from abc import ABCMeta, abstractmethod from nltk.ccg.api import FunctionalCategory def backwardOnly(left, right): return right.dir().is_backward()
null
170,806
from abc import ABCMeta, abstractmethod from nltk.ccg.api import FunctionalCategory def bothBackward(left, right): return left.dir().is_backward() and right.dir().is_backward()
null
170,807
from abc import ABCMeta, abstractmethod from nltk.ccg.api import FunctionalCategory def crossedDirs(left, right): return left.dir().is_forward() and right.dir().is_backward() def backwardBxConstraint(left, right): # The functors must be crossed inwards if not crossedDirs(left, right): return False # Permuting combinators must be allowed if not left.dir().can_cross() and right.dir().can_cross(): return False # The resulting argument category is restricted to be primitive return left.arg().is_primitive()
null
170,808
from abc import ABCMeta, abstractmethod from nltk.ccg.api import FunctionalCategory def bothForward(left, right): return left.dir().is_forward() and right.dir().is_forward() def forwardSConstraint(left, right): if not bothForward(left, right): return False return left.res().dir().is_forward() and left.arg().is_primitive()
null
170,809
from abc import ABCMeta, abstractmethod from nltk.ccg.api import FunctionalCategory def bothForward(left, right): def backwardSxConstraint(left, right): if not left.dir().can_cross() and right.dir().can_cross(): return False if not bothForward(left, right): return False return right.res().dir().is_backward() and right.arg().is_primitive()
null