id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
170,810 | from abc import ABCMeta, abstractmethod
from nltk.ccg.api import FunctionalCategory
def innermostFunction(categ):
while categ.res().is_function():
categ = categ.res()
return categ
def forwardTConstraint(left, right):
arg = innermostFunction(right)
return arg.dir().is_backward() and arg.res().is_primitive() | null |
170,811 | from abc import ABCMeta, abstractmethod
from nltk.ccg.api import FunctionalCategory
def innermostFunction(categ):
while categ.res().is_function():
categ = categ.res()
return categ
def backwardTConstraint(left, right):
arg = innermostFunction(left)
return arg.dir().is_forward() and arg.res().is_primitive() | null |
170,812 | from nltk.sem.logic import *
def compute_type_raised_semantics(semantics):
core = semantics
parent = None
while isinstance(core, LambdaExpression):
parent = core
core = core.term
var = Variable("F")
while var in core.free():
var = unique_variable(pattern=var)
core = ApplicationExpression(FunctionVariableExpression(var), core)
if parent is not None:
parent.term = core
else:
semantics = core
return LambdaExpression(var, semantics) | null |
170,813 | from nltk.sem.logic import *
def compute_function_semantics(function, argument):
return ApplicationExpression(function, argument).simplify() | null |
170,814 | from nltk.sem.logic import *
def compute_composition_semantics(function, argument):
assert isinstance(argument, LambdaExpression), (
"`" + str(argument) + "` must be a lambda expression"
)
return LambdaExpression(
argument.variable, ApplicationExpression(function, argument.term).simplify()
) | null |
170,815 | from nltk.sem.logic import *
def compute_substitution_semantics(function, argument):
assert isinstance(function, LambdaExpression) and isinstance(
function.term, LambdaExpression
), ("`" + str(function) + "` must be a lambda expression with 2 arguments")
assert isinstance(argument, LambdaExpression), (
"`" + str(argument) + "` must be a lambda expression"
)
new_argument = ApplicationExpression(
argument, VariableExpression(function.variable)
).simplify()
new_term = ApplicationExpression(function.term, new_argument).simplify()
return LambdaExpression(function.variable, new_term) | null |
170,816 | import itertools
from nltk.ccg.combinator import *
from nltk.ccg.combinator import (
BackwardApplication,
BackwardBx,
BackwardComposition,
BackwardSx,
BackwardT,
ForwardApplication,
ForwardComposition,
ForwardSubstitution,
ForwardT,
)
from nltk.ccg.lexicon import Token, fromstring
from nltk.ccg.logic import *
from nltk.parse import ParserI
from nltk.parse.chart import AbstractChartRule, Chart, EdgeI
from nltk.sem.logic import *
from nltk.tree import Tree
def compute_semantics(children, edge):
if children[0].label()[0].semantics() is None:
return None
if len(children) == 2:
if isinstance(edge.rule(), BackwardCombinator):
children = [children[1], children[0]]
combinator = edge.rule()._combinator
function = children[0].label()[0].semantics()
argument = children[1].label()[0].semantics()
if isinstance(combinator, UndirectedFunctionApplication):
return compute_function_semantics(function, argument)
elif isinstance(combinator, UndirectedComposition):
return compute_composition_semantics(function, argument)
elif isinstance(combinator, UndirectedSubstitution):
return compute_substitution_semantics(function, argument)
else:
raise AssertionError("Unsupported combinator '" + combinator + "'")
else:
return compute_type_raised_semantics(children[0].label()[0].semantics()) | null |
170,817 | import itertools
from nltk.ccg.combinator import *
from nltk.ccg.combinator import (
BackwardApplication,
BackwardBx,
BackwardComposition,
BackwardSx,
BackwardT,
ForwardApplication,
ForwardComposition,
ForwardSubstitution,
ForwardT,
)
from nltk.ccg.lexicon import Token, fromstring
from nltk.ccg.logic import *
from nltk.parse import ParserI
from nltk.parse.chart import AbstractChartRule, Chart, EdgeI
from nltk.sem.logic import *
from nltk.tree import Tree
DefaultRuleSet = (
ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet + TypeRaiseRuleSet
)
class CCGChartParser(ParserI):
"""
Chart parser for CCGs.
Based largely on the ChartParser class from NLTK.
"""
def __init__(self, lexicon, rules, trace=0):
self._lexicon = lexicon
self._rules = rules
self._trace = trace
def lexicon(self):
return self._lexicon
# Implements the CYK algorithm
def parse(self, tokens):
tokens = list(tokens)
chart = CCGChart(list(tokens))
lex = self._lexicon
# Initialize leaf edges.
for index in range(chart.num_leaves()):
for token in lex.categories(chart.leaf(index)):
new_edge = CCGLeafEdge(index, token, chart.leaf(index))
chart.insert(new_edge, ())
# Select a span for the new edges
for span in range(2, chart.num_leaves() + 1):
for start in range(0, chart.num_leaves() - span + 1):
# Try all possible pairs of edges that could generate
# an edge for that span
for part in range(1, span):
lstart = start
mid = start + part
rend = start + span
for left in chart.select(span=(lstart, mid)):
for right in chart.select(span=(mid, rend)):
# Generate all possible combinations of the two edges
for rule in self._rules:
edges_added_by_rule = 0
for newedge in rule.apply(chart, lex, left, right):
edges_added_by_rule += 1
# Output the resulting parses
return chart.parses(lex.start())
def printCCGDerivation(tree):
# Get the leaves and initial categories
leafcats = tree.pos()
leafstr = ""
catstr = ""
# Construct a string with both the leaf word and corresponding
# category aligned.
for (leaf, cat) in leafcats:
str_cat = "%s" % cat
nextlen = 2 + max(len(leaf), len(str_cat))
lcatlen = (nextlen - len(str_cat)) // 2
rcatlen = lcatlen + (nextlen - len(str_cat)) % 2
catstr += " " * lcatlen + str_cat + " " * rcatlen
lleaflen = (nextlen - len(leaf)) // 2
rleaflen = lleaflen + (nextlen - len(leaf)) % 2
leafstr += " " * lleaflen + leaf + " " * rleaflen
print(leafstr.rstrip())
print(catstr.rstrip())
# Display the derivation steps
printCCGTree(0, tree)
lex = fromstring(
"""
:- S, NP, N, VP # Primitive categories, S is the target primitive
Det :: NP/N # Family of words
Pro :: NP
TV :: VP/NP
Modal :: (S\\NP)/VP # Backslashes need to be escaped
I => Pro # Word -> Category mapping
you => Pro
the => Det
# Variables have the special keyword 'var'
# '.' prevents permutation
# ',' prevents composition
and => var\\.,var/.,var
which => (N\\N)/(S/NP)
will => Modal # Categories can be either explicit, or families.
might => Modal
cook => TV
eat => TV
mushrooms => N
parsnips => N
bacon => N
"""
)
def demo():
parser = CCGChartParser(lex, DefaultRuleSet)
for parse in parser.parse("I might cook and eat the bacon".split()):
printCCGDerivation(parse) | null |
170,818 | import json
json_tags = {}
TAG_PREFIX = "!"
The provided code snippet includes necessary dependencies for implementing the `register_tag` function. Write a Python function `def register_tag(cls)` to solve the following problem:
Decorates a class to register it's json tag.
Here is the function:
def register_tag(cls):
"""
Decorates a class to register it's json tag.
"""
json_tags[TAG_PREFIX + getattr(cls, "json_tag")] = cls
return cls | Decorates a class to register it's json tag. |
170,819 | import re
from textwrap import wrap
from nltk.data import load
def _format_tagset(tagset, tagpattern=None):
tagdict = load("help/tagsets/" + tagset + ".pickle")
if not tagpattern:
_print_entries(sorted(tagdict), tagdict)
elif tagpattern in tagdict:
_print_entries([tagpattern], tagdict)
else:
tagpattern = re.compile(tagpattern)
tags = [tag for tag in sorted(tagdict) if tagpattern.match(tag)]
if tags:
_print_entries(tags, tagdict)
else:
print("No matching tags found.")
def brown_tagset(tagpattern=None):
_format_tagset("brown_tagset", tagpattern) | null |
170,820 | import re
from textwrap import wrap
from nltk.data import load
def _format_tagset(tagset, tagpattern=None):
tagdict = load("help/tagsets/" + tagset + ".pickle")
if not tagpattern:
_print_entries(sorted(tagdict), tagdict)
elif tagpattern in tagdict:
_print_entries([tagpattern], tagdict)
else:
tagpattern = re.compile(tagpattern)
tags = [tag for tag in sorted(tagdict) if tagpattern.match(tag)]
if tags:
_print_entries(tags, tagdict)
else:
print("No matching tags found.")
def claws5_tagset(tagpattern=None):
_format_tagset("claws5_tagset", tagpattern) | null |
170,821 | import re
from textwrap import wrap
from nltk.data import load
def _format_tagset(tagset, tagpattern=None):
tagdict = load("help/tagsets/" + tagset + ".pickle")
if not tagpattern:
_print_entries(sorted(tagdict), tagdict)
elif tagpattern in tagdict:
_print_entries([tagpattern], tagdict)
else:
tagpattern = re.compile(tagpattern)
tags = [tag for tag in sorted(tagdict) if tagpattern.match(tag)]
if tags:
_print_entries(tags, tagdict)
else:
print("No matching tags found.")
def upenn_tagset(tagpattern=None):
_format_tagset("upenn_tagset", tagpattern) | null |
170,822 |
The provided code snippet includes necessary dependencies for implementing the `tuple2str` function. Write a Python function `def tuple2str(tagged_token, sep="/")` to solve the following problem:
Given the tuple representation of a tagged token, return the corresponding string representation. This representation is formed by concatenating the token's word string, followed by the separator, followed by the token's tag. (If the tag is None, then just return the bare word string.) >>> from nltk.tag.util import tuple2str >>> tagged_token = ('fly', 'NN') >>> tuple2str(tagged_token) 'fly/NN' :type tagged_token: tuple(str, str) :param tagged_token: The tuple representation of a tagged token. :type sep: str :param sep: The separator string used to separate word strings from tags.
Here is the function:
def tuple2str(tagged_token, sep="/"):
"""
Given the tuple representation of a tagged token, return the
corresponding string representation. This representation is
formed by concatenating the token's word string, followed by the
separator, followed by the token's tag. (If the tag is None,
then just return the bare word string.)
>>> from nltk.tag.util import tuple2str
>>> tagged_token = ('fly', 'NN')
>>> tuple2str(tagged_token)
'fly/NN'
:type tagged_token: tuple(str, str)
:param tagged_token: The tuple representation of a tagged token.
:type sep: str
:param sep: The separator string used to separate word strings
from tags.
"""
word, tag = tagged_token
if tag is None:
return word
else:
assert sep not in tag, "tag may not contain sep!"
return f"{word}{sep}{tag}" | Given the tuple representation of a tagged token, return the corresponding string representation. This representation is formed by concatenating the token's word string, followed by the separator, followed by the token's tag. (If the tag is None, then just return the bare word string.) >>> from nltk.tag.util import tuple2str >>> tagged_token = ('fly', 'NN') >>> tuple2str(tagged_token) 'fly/NN' :type tagged_token: tuple(str, str) :param tagged_token: The tuple representation of a tagged token. :type sep: str :param sep: The separator string used to separate word strings from tags. |
170,823 |
The provided code snippet includes necessary dependencies for implementing the `untag` function. Write a Python function `def untag(tagged_sentence)` to solve the following problem:
Given a tagged sentence, return an untagged version of that sentence. I.e., return a list containing the first element of each tuple in *tagged_sentence*. >>> from nltk.tag.util import untag >>> untag([('John', 'NNP'), ('saw', 'VBD'), ('Mary', 'NNP')]) ['John', 'saw', 'Mary']
Here is the function:
def untag(tagged_sentence):
"""
Given a tagged sentence, return an untagged version of that
sentence. I.e., return a list containing the first element
of each tuple in *tagged_sentence*.
>>> from nltk.tag.util import untag
>>> untag([('John', 'NNP'), ('saw', 'VBD'), ('Mary', 'NNP')])
['John', 'saw', 'Mary']
"""
return [w for (w, t) in tagged_sentence] | Given a tagged sentence, return an untagged version of that sentence. I.e., return a list containing the first element of each tuple in *tagged_sentence*. >>> from nltk.tag.util import untag >>> untag([('John', 'NNP'), ('saw', 'VBD'), ('Mary', 'NNP')]) ['John', 'saw', 'Mary'] |
170,824 | from collections import Counter, defaultdict
from nltk import jsontags
from nltk.tag import TaggerI
from nltk.tbl import Feature, Template
class Word(Feature):
"""
Feature which examines the text (word) of nearby tokens.
"""
json_tag = "nltk.tag.brill.Word"
def extract_property(tokens, index):
"""@return: The given token's text."""
return tokens[index][0]
class Pos(Feature):
"""
Feature which examines the tags of nearby tokens.
"""
json_tag = "nltk.tag.brill.Pos"
def extract_property(tokens, index):
"""@return: The given token's tag."""
return tokens[index][1]
def nltkdemo18():
"""
Return 18 templates, from the original nltk demo, in multi-feature syntax
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-3, -2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-1]), Word([1])),
]
The provided code snippet includes necessary dependencies for implementing the `nltkdemo18plus` function. Write a Python function `def nltkdemo18plus()` to solve the following problem:
Return 18 templates, from the original nltk demo, and additionally a few multi-feature ones (the motivation is easy comparison with nltkdemo18)
Here is the function:
def nltkdemo18plus():
"""
Return 18 templates, from the original nltk demo, and additionally a few
multi-feature ones (the motivation is easy comparison with nltkdemo18)
"""
return nltkdemo18() + [
Template(Word([-1]), Pos([1])),
Template(Pos([-1]), Word([1])),
Template(Word([-1]), Word([0]), Pos([1])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-1]), Word([0]), Pos([1])),
] | Return 18 templates, from the original nltk demo, and additionally a few multi-feature ones (the motivation is easy comparison with nltkdemo18) |
170,825 | from collections import Counter, defaultdict
from nltk import jsontags
from nltk.tag import TaggerI
from nltk.tbl import Feature, Template
class Word(Feature):
"""
Feature which examines the text (word) of nearby tokens.
"""
json_tag = "nltk.tag.brill.Word"
def extract_property(tokens, index):
"""@return: The given token's text."""
return tokens[index][0]
class Pos(Feature):
"""
Feature which examines the tags of nearby tokens.
"""
json_tag = "nltk.tag.brill.Pos"
def extract_property(tokens, index):
"""@return: The given token's tag."""
return tokens[index][1]
The provided code snippet includes necessary dependencies for implementing the `fntbl37` function. Write a Python function `def fntbl37()` to solve the following problem:
Return 37 templates taken from the postagging task of the fntbl distribution https://www.cs.jhu.edu/~rflorian/fntbl/ (37 is after excluding a handful which do not condition on Pos[0]; fntbl can do that but the current nltk implementation cannot.)
Here is the function:
def fntbl37():
"""
Return 37 templates taken from the postagging task of the
fntbl distribution https://www.cs.jhu.edu/~rflorian/fntbl/
(37 is after excluding a handful which do not condition on Pos[0];
fntbl can do that but the current nltk implementation cannot.)
"""
return [
Template(Word([0]), Word([1]), Word([2])),
Template(Word([-1]), Word([0]), Word([1])),
Template(Word([0]), Word([-1])),
Template(Word([0]), Word([1])),
Template(Word([0]), Word([2])),
Template(Word([0]), Word([-2])),
Template(Word([1, 2])),
Template(Word([-2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-3, -2, -1])),
Template(Word([0]), Pos([2])),
Template(Word([0]), Pos([-2])),
Template(Word([0]), Pos([1])),
Template(Word([0]), Pos([-1])),
Template(Word([0])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([1])),
Template(Word([-1])),
Template(Pos([-1]), Pos([1])),
Template(Pos([1]), Pos([2])),
Template(Pos([-1]), Pos([-2])),
Template(Pos([1])),
Template(Pos([-1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([1, 2, 3])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([-2, -1])),
Template(Pos([1]), Word([0]), Word([1])),
Template(Pos([1]), Word([0]), Word([-1])),
Template(Pos([-1]), Word([-1]), Word([0])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Pos([1]), Pos([2]), Word([1])),
] | Return 37 templates taken from the postagging task of the fntbl distribution https://www.cs.jhu.edu/~rflorian/fntbl/ (37 is after excluding a handful which do not condition on Pos[0]; fntbl can do that but the current nltk implementation cannot.) |
170,826 | import itertools
import re
from nltk.metrics import accuracy
from nltk.probability import (
ConditionalFreqDist,
ConditionalProbDist,
DictionaryConditionalProbDist,
DictionaryProbDist,
FreqDist,
LidstoneProbDist,
MLEProbDist,
MutableProbDist,
RandomProbDist,
)
from nltk.tag.api import TaggerI
from nltk.util import LazyMap, unique_list
def _identity(labeled_symbols):
return labeled_symbols | null |
170,827 | import itertools
import re
from nltk.metrics import accuracy
from nltk.probability import (
ConditionalFreqDist,
ConditionalProbDist,
DictionaryConditionalProbDist,
DictionaryProbDist,
FreqDist,
LidstoneProbDist,
MLEProbDist,
MutableProbDist,
RandomProbDist,
)
from nltk.tag.api import TaggerI
from nltk.util import LazyMap, unique_list
def _ninf_array(shape):
res = np.empty(shape, np.float64)
res.fill(-np.inf)
return res | null |
170,828 | import itertools
import re
from nltk.metrics import accuracy
from nltk.probability import (
ConditionalFreqDist,
ConditionalProbDist,
DictionaryConditionalProbDist,
DictionaryProbDist,
FreqDist,
LidstoneProbDist,
MLEProbDist,
MutableProbDist,
RandomProbDist,
)
from nltk.tag.api import TaggerI
from nltk.util import LazyMap, unique_list
def logsumexp2(arr):
max_ = arr.max()
return np.log2(np.sum(2 ** (arr - max_))) + max_ | null |
170,829 | import itertools
import re
from nltk.metrics import accuracy
from nltk.probability import (
ConditionalFreqDist,
ConditionalProbDist,
DictionaryConditionalProbDist,
DictionaryProbDist,
FreqDist,
LidstoneProbDist,
MLEProbDist,
MutableProbDist,
RandomProbDist,
)
from nltk.tag.api import TaggerI
from nltk.util import LazyMap, unique_list
The provided code snippet includes necessary dependencies for implementing the `_log_add` function. Write a Python function `def _log_add(*values)` to solve the following problem:
Adds the logged values, returning the logarithm of the addition.
Here is the function:
def _log_add(*values):
"""
Adds the logged values, returning the logarithm of the addition.
"""
x = max(values)
if x > -np.inf:
sum_diffs = 0
for value in values:
sum_diffs += 2 ** (value - x)
return x + np.log2(sum_diffs)
else:
return x | Adds the logged values, returning the logarithm of the addition. |
170,830 | import itertools
import re
from nltk.metrics import accuracy
from nltk.probability import (
ConditionalFreqDist,
ConditionalProbDist,
DictionaryConditionalProbDist,
DictionaryProbDist,
FreqDist,
LidstoneProbDist,
MLEProbDist,
MutableProbDist,
RandomProbDist,
)
from nltk.tag.api import TaggerI
from nltk.util import LazyMap, unique_list
def _market_hmm_example():
"""
Return an example HMM (described at page 381, Huang et al)
"""
states = ["bull", "bear", "static"]
symbols = ["up", "down", "unchanged"]
A = np.array([[0.6, 0.2, 0.2], [0.5, 0.3, 0.2], [0.4, 0.1, 0.5]], np.float64)
B = np.array([[0.7, 0.1, 0.2], [0.1, 0.6, 0.3], [0.3, 0.3, 0.4]], np.float64)
pi = np.array([0.5, 0.2, 0.3], np.float64)
model = _create_hmm_tagger(states, symbols, A, B, pi)
return model, states, symbols
def demo():
# demonstrates HMM probability calculation
print()
print("HMM probability calculation demo")
print()
model, states, symbols = _market_hmm_example()
print("Testing", model)
for test in [
["up", "up"],
["up", "down", "up"],
["down"] * 5,
["unchanged"] * 5 + ["up"],
]:
sequence = [(t, None) for t in test]
print("Testing with state sequence", test)
print("probability =", model.probability(sequence))
print("tagging = ", model.tag([word for (word, tag) in sequence]))
print("p(tagged) = ", model.probability(sequence))
print("H = ", model.entropy(sequence))
print("H_exh = ", model._exhaustive_entropy(sequence))
print("H(point) = ", model.point_entropy(sequence))
print("H_exh(point)=", model._exhaustive_point_entropy(sequence))
print() | null |
170,831 | import itertools
import re
from nltk.metrics import accuracy
from nltk.probability import (
ConditionalFreqDist,
ConditionalProbDist,
DictionaryConditionalProbDist,
DictionaryProbDist,
FreqDist,
LidstoneProbDist,
MLEProbDist,
MutableProbDist,
RandomProbDist,
)
from nltk.tag.api import TaggerI
from nltk.util import LazyMap, unique_list
class HiddenMarkovModelTrainer:
"""
Algorithms for learning HMM parameters from training data. These include
both supervised learning (MLE) and unsupervised learning (Baum-Welch).
Creates an HMM trainer to induce an HMM with the given states and
output symbol alphabet. A supervised and unsupervised training
method may be used. If either of the states or symbols are not given,
these may be derived from supervised training.
:param states: the set of state labels
:type states: sequence of any
:param symbols: the set of observation symbols
:type symbols: sequence of any
"""
def __init__(self, states=None, symbols=None):
self._states = states if states else []
self._symbols = symbols if symbols else []
def train(self, labeled_sequences=None, unlabeled_sequences=None, **kwargs):
"""
Trains the HMM using both (or either of) supervised and unsupervised
techniques.
:return: the trained model
:rtype: HiddenMarkovModelTagger
:param labelled_sequences: the supervised training data, a set of
labelled sequences of observations
ex: [ (word_1, tag_1),...,(word_n,tag_n) ]
:type labelled_sequences: list
:param unlabeled_sequences: the unsupervised training data, a set of
sequences of observations
ex: [ word_1, ..., word_n ]
:type unlabeled_sequences: list
:param kwargs: additional arguments to pass to the training methods
"""
assert labeled_sequences or unlabeled_sequences
model = None
if labeled_sequences:
model = self.train_supervised(labeled_sequences, **kwargs)
if unlabeled_sequences:
if model:
kwargs["model"] = model
model = self.train_unsupervised(unlabeled_sequences, **kwargs)
return model
def _baum_welch_step(self, sequence, model, symbol_to_number):
N = len(model._states)
M = len(model._symbols)
T = len(sequence)
# compute forward and backward probabilities
alpha = model._forward_probability(sequence)
beta = model._backward_probability(sequence)
# find the log probability of the sequence
lpk = logsumexp2(alpha[T - 1])
A_numer = _ninf_array((N, N))
B_numer = _ninf_array((N, M))
A_denom = _ninf_array(N)
B_denom = _ninf_array(N)
transitions_logprob = model._transitions_matrix().T
for t in range(T):
symbol = sequence[t][_TEXT] # not found? FIXME
next_symbol = None
if t < T - 1:
next_symbol = sequence[t + 1][_TEXT] # not found? FIXME
xi = symbol_to_number[symbol]
next_outputs_logprob = model._outputs_vector(next_symbol)
alpha_plus_beta = alpha[t] + beta[t]
if t < T - 1:
numer_add = (
transitions_logprob
+ next_outputs_logprob
+ beta[t + 1]
+ alpha[t].reshape(N, 1)
)
A_numer = np.logaddexp2(A_numer, numer_add)
A_denom = np.logaddexp2(A_denom, alpha_plus_beta)
else:
B_denom = np.logaddexp2(A_denom, alpha_plus_beta)
B_numer[:, xi] = np.logaddexp2(B_numer[:, xi], alpha_plus_beta)
return lpk, A_numer, A_denom, B_numer, B_denom
def train_unsupervised(self, unlabeled_sequences, update_outputs=True, **kwargs):
"""
Trains the HMM using the Baum-Welch algorithm to maximise the
probability of the data sequence. This is a variant of the EM
algorithm, and is unsupervised in that it doesn't need the state
sequences for the symbols. The code is based on 'A Tutorial on Hidden
Markov Models and Selected Applications in Speech Recognition',
Lawrence Rabiner, IEEE, 1989.
:return: the trained model
:rtype: HiddenMarkovModelTagger
:param unlabeled_sequences: the training data, a set of
sequences of observations
:type unlabeled_sequences: list
kwargs may include following parameters:
:param model: a HiddenMarkovModelTagger instance used to begin
the Baum-Welch algorithm
:param max_iterations: the maximum number of EM iterations
:param convergence_logprob: the maximum change in log probability to
allow convergence
"""
# create a uniform HMM, which will be iteratively refined, unless
# given an existing model
model = kwargs.get("model")
if not model:
priors = RandomProbDist(self._states)
transitions = DictionaryConditionalProbDist(
{state: RandomProbDist(self._states) for state in self._states}
)
outputs = DictionaryConditionalProbDist(
{state: RandomProbDist(self._symbols) for state in self._states}
)
model = HiddenMarkovModelTagger(
self._symbols, self._states, transitions, outputs, priors
)
self._states = model._states
self._symbols = model._symbols
N = len(self._states)
M = len(self._symbols)
symbol_numbers = {sym: i for i, sym in enumerate(self._symbols)}
# update model prob dists so that they can be modified
# model._priors = MutableProbDist(model._priors, self._states)
model._transitions = DictionaryConditionalProbDist(
{
s: MutableProbDist(model._transitions[s], self._states)
for s in self._states
}
)
if update_outputs:
model._outputs = DictionaryConditionalProbDist(
{
s: MutableProbDist(model._outputs[s], self._symbols)
for s in self._states
}
)
model.reset_cache()
# iterate until convergence
converged = False
last_logprob = None
iteration = 0
max_iterations = kwargs.get("max_iterations", 1000)
epsilon = kwargs.get("convergence_logprob", 1e-6)
while not converged and iteration < max_iterations:
A_numer = _ninf_array((N, N))
B_numer = _ninf_array((N, M))
A_denom = _ninf_array(N)
B_denom = _ninf_array(N)
logprob = 0
for sequence in unlabeled_sequences:
sequence = list(sequence)
if not sequence:
continue
(
lpk,
seq_A_numer,
seq_A_denom,
seq_B_numer,
seq_B_denom,
) = self._baum_welch_step(sequence, model, symbol_numbers)
# add these sums to the global A and B values
for i in range(N):
A_numer[i] = np.logaddexp2(A_numer[i], seq_A_numer[i] - lpk)
B_numer[i] = np.logaddexp2(B_numer[i], seq_B_numer[i] - lpk)
A_denom = np.logaddexp2(A_denom, seq_A_denom - lpk)
B_denom = np.logaddexp2(B_denom, seq_B_denom - lpk)
logprob += lpk
# use the calculated values to update the transition and output
# probability values
for i in range(N):
logprob_Ai = A_numer[i] - A_denom[i]
logprob_Bi = B_numer[i] - B_denom[i]
# We should normalize all probabilities (see p.391 Huang et al)
# Let sum(P) be K.
# We can divide each Pi by K to make sum(P) == 1.
# Pi' = Pi/K
# log2(Pi') = log2(Pi) - log2(K)
logprob_Ai -= logsumexp2(logprob_Ai)
logprob_Bi -= logsumexp2(logprob_Bi)
# update output and transition probabilities
si = self._states[i]
for j in range(N):
sj = self._states[j]
model._transitions[si].update(sj, logprob_Ai[j])
if update_outputs:
for k in range(M):
ok = self._symbols[k]
model._outputs[si].update(ok, logprob_Bi[k])
# Rabiner says the priors don't need to be updated. I don't
# believe him. FIXME
# test for convergence
if iteration > 0 and abs(logprob - last_logprob) < epsilon:
converged = True
print("iteration", iteration, "logprob", logprob)
iteration += 1
last_logprob = logprob
return model
def train_supervised(self, labelled_sequences, estimator=None):
"""
Supervised training maximising the joint probability of the symbol and
state sequences. This is done via collecting frequencies of
transitions between states, symbol observations while within each
state and which states start a sentence. These frequency distributions
are then normalised into probability estimates, which can be
smoothed if desired.
:return: the trained model
:rtype: HiddenMarkovModelTagger
:param labelled_sequences: the training data, a set of
labelled sequences of observations
:type labelled_sequences: list
:param estimator: a function taking
a FreqDist and a number of bins and returning a CProbDistI;
otherwise a MLE estimate is used
"""
# default to the MLE estimate
if estimator is None:
estimator = lambda fdist, bins: MLEProbDist(fdist)
# count occurrences of starting states, transitions out of each state
# and output symbols observed in each state
known_symbols = set(self._symbols)
known_states = set(self._states)
starting = FreqDist()
transitions = ConditionalFreqDist()
outputs = ConditionalFreqDist()
for sequence in labelled_sequences:
lasts = None
for token in sequence:
state = token[_TAG]
symbol = token[_TEXT]
if lasts is None:
starting[state] += 1
else:
transitions[lasts][state] += 1
outputs[state][symbol] += 1
lasts = state
# update the state and symbol lists
if state not in known_states:
self._states.append(state)
known_states.add(state)
if symbol not in known_symbols:
self._symbols.append(symbol)
known_symbols.add(symbol)
# create probability distributions (with smoothing)
N = len(self._states)
pi = estimator(starting, N)
A = ConditionalProbDist(transitions, estimator, N)
B = ConditionalProbDist(outputs, estimator, len(self._symbols))
return HiddenMarkovModelTagger(self._symbols, self._states, A, B, pi)
def load_pos(num_sents):
from nltk.corpus import brown
sentences = brown.tagged_sents(categories="news")[:num_sents]
tag_re = re.compile(r"[*]|--|[^+*-]+")
tag_set = set()
symbols = set()
cleaned_sentences = []
for sentence in sentences:
for i in range(len(sentence)):
word, tag = sentence[i]
word = word.lower() # normalize
symbols.add(word) # log this word
# Clean up the tag.
tag = tag_re.match(tag).group()
tag_set.add(tag)
sentence[i] = (word, tag) # store cleaned-up tagged token
cleaned_sentences += [sentence]
return cleaned_sentences, list(tag_set), list(symbols)
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is parameterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalent to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to parameterize the
estimate. The Lidstone estimate is equivalent to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError(
"A %s probability distribution " % name + "must have at least one bin."
)
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError(
"\nThe number of bins in a %s distribution " % name
+ "(%d) must be greater than or equal to\n" % bins
+ "the number of bins in the FreqDist used "
+ "to create it (%d)." % freqdist.B()
)
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None:
bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return "<LidstoneProbDist based on %d samples>" % self._freqdist.N()
def demo_pos():
# demonstrates POS tagging using supervised training
print()
print("HMM POS tagging demo")
print()
print("Training HMM...")
labelled_sequences, tag_set, symbols = load_pos(20000)
trainer = HiddenMarkovModelTrainer(tag_set, symbols)
hmm = trainer.train_supervised(
labelled_sequences[10:],
estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins),
)
print("Testing...")
hmm.test(labelled_sequences[:10], verbose=True) | null |
170,832 | import itertools
import re
from nltk.metrics import accuracy
from nltk.probability import (
ConditionalFreqDist,
ConditionalProbDist,
DictionaryConditionalProbDist,
DictionaryProbDist,
FreqDist,
LidstoneProbDist,
MLEProbDist,
MutableProbDist,
RandomProbDist,
)
from nltk.tag.api import TaggerI
from nltk.util import LazyMap, unique_list
_TEXT = 0
class HiddenMarkovModelTrainer:
"""
Algorithms for learning HMM parameters from training data. These include
both supervised learning (MLE) and unsupervised learning (Baum-Welch).
Creates an HMM trainer to induce an HMM with the given states and
output symbol alphabet. A supervised and unsupervised training
method may be used. If either of the states or symbols are not given,
these may be derived from supervised training.
:param states: the set of state labels
:type states: sequence of any
:param symbols: the set of observation symbols
:type symbols: sequence of any
"""
def __init__(self, states=None, symbols=None):
self._states = states if states else []
self._symbols = symbols if symbols else []
def train(self, labeled_sequences=None, unlabeled_sequences=None, **kwargs):
"""
Trains the HMM using both (or either of) supervised and unsupervised
techniques.
:return: the trained model
:rtype: HiddenMarkovModelTagger
:param labelled_sequences: the supervised training data, a set of
labelled sequences of observations
ex: [ (word_1, tag_1),...,(word_n,tag_n) ]
:type labelled_sequences: list
:param unlabeled_sequences: the unsupervised training data, a set of
sequences of observations
ex: [ word_1, ..., word_n ]
:type unlabeled_sequences: list
:param kwargs: additional arguments to pass to the training methods
"""
assert labeled_sequences or unlabeled_sequences
model = None
if labeled_sequences:
model = self.train_supervised(labeled_sequences, **kwargs)
if unlabeled_sequences:
if model:
kwargs["model"] = model
model = self.train_unsupervised(unlabeled_sequences, **kwargs)
return model
def _baum_welch_step(self, sequence, model, symbol_to_number):
N = len(model._states)
M = len(model._symbols)
T = len(sequence)
# compute forward and backward probabilities
alpha = model._forward_probability(sequence)
beta = model._backward_probability(sequence)
# find the log probability of the sequence
lpk = logsumexp2(alpha[T - 1])
A_numer = _ninf_array((N, N))
B_numer = _ninf_array((N, M))
A_denom = _ninf_array(N)
B_denom = _ninf_array(N)
transitions_logprob = model._transitions_matrix().T
for t in range(T):
symbol = sequence[t][_TEXT] # not found? FIXME
next_symbol = None
if t < T - 1:
next_symbol = sequence[t + 1][_TEXT] # not found? FIXME
xi = symbol_to_number[symbol]
next_outputs_logprob = model._outputs_vector(next_symbol)
alpha_plus_beta = alpha[t] + beta[t]
if t < T - 1:
numer_add = (
transitions_logprob
+ next_outputs_logprob
+ beta[t + 1]
+ alpha[t].reshape(N, 1)
)
A_numer = np.logaddexp2(A_numer, numer_add)
A_denom = np.logaddexp2(A_denom, alpha_plus_beta)
else:
B_denom = np.logaddexp2(A_denom, alpha_plus_beta)
B_numer[:, xi] = np.logaddexp2(B_numer[:, xi], alpha_plus_beta)
return lpk, A_numer, A_denom, B_numer, B_denom
def train_unsupervised(self, unlabeled_sequences, update_outputs=True, **kwargs):
"""
Trains the HMM using the Baum-Welch algorithm to maximise the
probability of the data sequence. This is a variant of the EM
algorithm, and is unsupervised in that it doesn't need the state
sequences for the symbols. The code is based on 'A Tutorial on Hidden
Markov Models and Selected Applications in Speech Recognition',
Lawrence Rabiner, IEEE, 1989.
:return: the trained model
:rtype: HiddenMarkovModelTagger
:param unlabeled_sequences: the training data, a set of
sequences of observations
:type unlabeled_sequences: list
kwargs may include following parameters:
:param model: a HiddenMarkovModelTagger instance used to begin
the Baum-Welch algorithm
:param max_iterations: the maximum number of EM iterations
:param convergence_logprob: the maximum change in log probability to
allow convergence
"""
# create a uniform HMM, which will be iteratively refined, unless
# given an existing model
model = kwargs.get("model")
if not model:
priors = RandomProbDist(self._states)
transitions = DictionaryConditionalProbDist(
{state: RandomProbDist(self._states) for state in self._states}
)
outputs = DictionaryConditionalProbDist(
{state: RandomProbDist(self._symbols) for state in self._states}
)
model = HiddenMarkovModelTagger(
self._symbols, self._states, transitions, outputs, priors
)
self._states = model._states
self._symbols = model._symbols
N = len(self._states)
M = len(self._symbols)
symbol_numbers = {sym: i for i, sym in enumerate(self._symbols)}
# update model prob dists so that they can be modified
# model._priors = MutableProbDist(model._priors, self._states)
model._transitions = DictionaryConditionalProbDist(
{
s: MutableProbDist(model._transitions[s], self._states)
for s in self._states
}
)
if update_outputs:
model._outputs = DictionaryConditionalProbDist(
{
s: MutableProbDist(model._outputs[s], self._symbols)
for s in self._states
}
)
model.reset_cache()
# iterate until convergence
converged = False
last_logprob = None
iteration = 0
max_iterations = kwargs.get("max_iterations", 1000)
epsilon = kwargs.get("convergence_logprob", 1e-6)
while not converged and iteration < max_iterations:
A_numer = _ninf_array((N, N))
B_numer = _ninf_array((N, M))
A_denom = _ninf_array(N)
B_denom = _ninf_array(N)
logprob = 0
for sequence in unlabeled_sequences:
sequence = list(sequence)
if not sequence:
continue
(
lpk,
seq_A_numer,
seq_A_denom,
seq_B_numer,
seq_B_denom,
) = self._baum_welch_step(sequence, model, symbol_numbers)
# add these sums to the global A and B values
for i in range(N):
A_numer[i] = np.logaddexp2(A_numer[i], seq_A_numer[i] - lpk)
B_numer[i] = np.logaddexp2(B_numer[i], seq_B_numer[i] - lpk)
A_denom = np.logaddexp2(A_denom, seq_A_denom - lpk)
B_denom = np.logaddexp2(B_denom, seq_B_denom - lpk)
logprob += lpk
# use the calculated values to update the transition and output
# probability values
for i in range(N):
logprob_Ai = A_numer[i] - A_denom[i]
logprob_Bi = B_numer[i] - B_denom[i]
# We should normalize all probabilities (see p.391 Huang et al)
# Let sum(P) be K.
# We can divide each Pi by K to make sum(P) == 1.
# Pi' = Pi/K
# log2(Pi') = log2(Pi) - log2(K)
logprob_Ai -= logsumexp2(logprob_Ai)
logprob_Bi -= logsumexp2(logprob_Bi)
# update output and transition probabilities
si = self._states[i]
for j in range(N):
sj = self._states[j]
model._transitions[si].update(sj, logprob_Ai[j])
if update_outputs:
for k in range(M):
ok = self._symbols[k]
model._outputs[si].update(ok, logprob_Bi[k])
# Rabiner says the priors don't need to be updated. I don't
# believe him. FIXME
# test for convergence
if iteration > 0 and abs(logprob - last_logprob) < epsilon:
converged = True
print("iteration", iteration, "logprob", logprob)
iteration += 1
last_logprob = logprob
return model
def train_supervised(self, labelled_sequences, estimator=None):
"""
Supervised training maximising the joint probability of the symbol and
state sequences. This is done via collecting frequencies of
transitions between states, symbol observations while within each
state and which states start a sentence. These frequency distributions
are then normalised into probability estimates, which can be
smoothed if desired.
:return: the trained model
:rtype: HiddenMarkovModelTagger
:param labelled_sequences: the training data, a set of
labelled sequences of observations
:type labelled_sequences: list
:param estimator: a function taking
a FreqDist and a number of bins and returning a CProbDistI;
otherwise a MLE estimate is used
"""
# default to the MLE estimate
if estimator is None:
estimator = lambda fdist, bins: MLEProbDist(fdist)
# count occurrences of starting states, transitions out of each state
# and output symbols observed in each state
known_symbols = set(self._symbols)
known_states = set(self._states)
starting = FreqDist()
transitions = ConditionalFreqDist()
outputs = ConditionalFreqDist()
for sequence in labelled_sequences:
lasts = None
for token in sequence:
state = token[_TAG]
symbol = token[_TEXT]
if lasts is None:
starting[state] += 1
else:
transitions[lasts][state] += 1
outputs[state][symbol] += 1
lasts = state
# update the state and symbol lists
if state not in known_states:
self._states.append(state)
known_states.add(state)
if symbol not in known_symbols:
self._symbols.append(symbol)
known_symbols.add(symbol)
# create probability distributions (with smoothing)
N = len(self._states)
pi = estimator(starting, N)
A = ConditionalProbDist(transitions, estimator, N)
B = ConditionalProbDist(outputs, estimator, len(self._symbols))
return HiddenMarkovModelTagger(self._symbols, self._states, A, B, pi)
def load_pos(num_sents):
from nltk.corpus import brown
sentences = brown.tagged_sents(categories="news")[:num_sents]
tag_re = re.compile(r"[*]|--|[^+*-]+")
tag_set = set()
symbols = set()
cleaned_sentences = []
for sentence in sentences:
for i in range(len(sentence)):
word, tag = sentence[i]
word = word.lower() # normalize
symbols.add(word) # log this word
# Clean up the tag.
tag = tag_re.match(tag).group()
tag_set.add(tag)
sentence[i] = (word, tag) # store cleaned-up tagged token
cleaned_sentences += [sentence]
return cleaned_sentences, list(tag_set), list(symbols)
def _untag(sentences):
unlabeled = []
for sentence in sentences:
unlabeled.append([(token[_TEXT], None) for token in sentence])
return unlabeled
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is parameterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalent to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to parameterize the
estimate. The Lidstone estimate is equivalent to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError(
"A %s probability distribution " % name + "must have at least one bin."
)
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError(
"\nThe number of bins in a %s distribution " % name
+ "(%d) must be greater than or equal to\n" % bins
+ "the number of bins in the FreqDist used "
+ "to create it (%d)." % freqdist.B()
)
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None:
bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return "<LidstoneProbDist based on %d samples>" % self._freqdist.N()
def demo_pos_bw(
test=10, supervised=20, unsupervised=10, verbose=True, max_iterations=5
):
# demonstrates the Baum-Welch algorithm in POS tagging
print()
print("Baum-Welch demo for POS tagging")
print()
print("Training HMM (supervised, %d sentences)..." % supervised)
sentences, tag_set, symbols = load_pos(test + supervised + unsupervised)
symbols = set()
for sentence in sentences:
for token in sentence:
symbols.add(token[_TEXT])
trainer = HiddenMarkovModelTrainer(tag_set, list(symbols))
hmm = trainer.train_supervised(
sentences[test : test + supervised],
estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins),
)
hmm.test(sentences[:test], verbose=verbose)
print("Training (unsupervised, %d sentences)..." % unsupervised)
# it's rather slow - so only use 10 samples by default
unlabeled = _untag(sentences[test + supervised :])
hmm = trainer.train_unsupervised(
unlabeled, model=hmm, max_iterations=max_iterations
)
hmm.test(sentences[:test], verbose=verbose) | null |
170,833 | import itertools
import re
from nltk.metrics import accuracy
from nltk.probability import (
ConditionalFreqDist,
ConditionalProbDist,
DictionaryConditionalProbDist,
DictionaryProbDist,
FreqDist,
LidstoneProbDist,
MLEProbDist,
MutableProbDist,
RandomProbDist,
)
from nltk.tag.api import TaggerI
from nltk.util import LazyMap, unique_list
class HiddenMarkovModelTrainer:
"""
Algorithms for learning HMM parameters from training data. These include
both supervised learning (MLE) and unsupervised learning (Baum-Welch).
Creates an HMM trainer to induce an HMM with the given states and
output symbol alphabet. A supervised and unsupervised training
method may be used. If either of the states or symbols are not given,
these may be derived from supervised training.
:param states: the set of state labels
:type states: sequence of any
:param symbols: the set of observation symbols
:type symbols: sequence of any
"""
def __init__(self, states=None, symbols=None):
self._states = states if states else []
self._symbols = symbols if symbols else []
def train(self, labeled_sequences=None, unlabeled_sequences=None, **kwargs):
"""
Trains the HMM using both (or either of) supervised and unsupervised
techniques.
:return: the trained model
:rtype: HiddenMarkovModelTagger
:param labelled_sequences: the supervised training data, a set of
labelled sequences of observations
ex: [ (word_1, tag_1),...,(word_n,tag_n) ]
:type labelled_sequences: list
:param unlabeled_sequences: the unsupervised training data, a set of
sequences of observations
ex: [ word_1, ..., word_n ]
:type unlabeled_sequences: list
:param kwargs: additional arguments to pass to the training methods
"""
assert labeled_sequences or unlabeled_sequences
model = None
if labeled_sequences:
model = self.train_supervised(labeled_sequences, **kwargs)
if unlabeled_sequences:
if model:
kwargs["model"] = model
model = self.train_unsupervised(unlabeled_sequences, **kwargs)
return model
def _baum_welch_step(self, sequence, model, symbol_to_number):
N = len(model._states)
M = len(model._symbols)
T = len(sequence)
# compute forward and backward probabilities
alpha = model._forward_probability(sequence)
beta = model._backward_probability(sequence)
# find the log probability of the sequence
lpk = logsumexp2(alpha[T - 1])
A_numer = _ninf_array((N, N))
B_numer = _ninf_array((N, M))
A_denom = _ninf_array(N)
B_denom = _ninf_array(N)
transitions_logprob = model._transitions_matrix().T
for t in range(T):
symbol = sequence[t][_TEXT] # not found? FIXME
next_symbol = None
if t < T - 1:
next_symbol = sequence[t + 1][_TEXT] # not found? FIXME
xi = symbol_to_number[symbol]
next_outputs_logprob = model._outputs_vector(next_symbol)
alpha_plus_beta = alpha[t] + beta[t]
if t < T - 1:
numer_add = (
transitions_logprob
+ next_outputs_logprob
+ beta[t + 1]
+ alpha[t].reshape(N, 1)
)
A_numer = np.logaddexp2(A_numer, numer_add)
A_denom = np.logaddexp2(A_denom, alpha_plus_beta)
else:
B_denom = np.logaddexp2(A_denom, alpha_plus_beta)
B_numer[:, xi] = np.logaddexp2(B_numer[:, xi], alpha_plus_beta)
return lpk, A_numer, A_denom, B_numer, B_denom
def train_unsupervised(self, unlabeled_sequences, update_outputs=True, **kwargs):
"""
Trains the HMM using the Baum-Welch algorithm to maximise the
probability of the data sequence. This is a variant of the EM
algorithm, and is unsupervised in that it doesn't need the state
sequences for the symbols. The code is based on 'A Tutorial on Hidden
Markov Models and Selected Applications in Speech Recognition',
Lawrence Rabiner, IEEE, 1989.
:return: the trained model
:rtype: HiddenMarkovModelTagger
:param unlabeled_sequences: the training data, a set of
sequences of observations
:type unlabeled_sequences: list
kwargs may include following parameters:
:param model: a HiddenMarkovModelTagger instance used to begin
the Baum-Welch algorithm
:param max_iterations: the maximum number of EM iterations
:param convergence_logprob: the maximum change in log probability to
allow convergence
"""
# create a uniform HMM, which will be iteratively refined, unless
# given an existing model
model = kwargs.get("model")
if not model:
priors = RandomProbDist(self._states)
transitions = DictionaryConditionalProbDist(
{state: RandomProbDist(self._states) for state in self._states}
)
outputs = DictionaryConditionalProbDist(
{state: RandomProbDist(self._symbols) for state in self._states}
)
model = HiddenMarkovModelTagger(
self._symbols, self._states, transitions, outputs, priors
)
self._states = model._states
self._symbols = model._symbols
N = len(self._states)
M = len(self._symbols)
symbol_numbers = {sym: i for i, sym in enumerate(self._symbols)}
# update model prob dists so that they can be modified
# model._priors = MutableProbDist(model._priors, self._states)
model._transitions = DictionaryConditionalProbDist(
{
s: MutableProbDist(model._transitions[s], self._states)
for s in self._states
}
)
if update_outputs:
model._outputs = DictionaryConditionalProbDist(
{
s: MutableProbDist(model._outputs[s], self._symbols)
for s in self._states
}
)
model.reset_cache()
# iterate until convergence
converged = False
last_logprob = None
iteration = 0
max_iterations = kwargs.get("max_iterations", 1000)
epsilon = kwargs.get("convergence_logprob", 1e-6)
while not converged and iteration < max_iterations:
A_numer = _ninf_array((N, N))
B_numer = _ninf_array((N, M))
A_denom = _ninf_array(N)
B_denom = _ninf_array(N)
logprob = 0
for sequence in unlabeled_sequences:
sequence = list(sequence)
if not sequence:
continue
(
lpk,
seq_A_numer,
seq_A_denom,
seq_B_numer,
seq_B_denom,
) = self._baum_welch_step(sequence, model, symbol_numbers)
# add these sums to the global A and B values
for i in range(N):
A_numer[i] = np.logaddexp2(A_numer[i], seq_A_numer[i] - lpk)
B_numer[i] = np.logaddexp2(B_numer[i], seq_B_numer[i] - lpk)
A_denom = np.logaddexp2(A_denom, seq_A_denom - lpk)
B_denom = np.logaddexp2(B_denom, seq_B_denom - lpk)
logprob += lpk
# use the calculated values to update the transition and output
# probability values
for i in range(N):
logprob_Ai = A_numer[i] - A_denom[i]
logprob_Bi = B_numer[i] - B_denom[i]
# We should normalize all probabilities (see p.391 Huang et al)
# Let sum(P) be K.
# We can divide each Pi by K to make sum(P) == 1.
# Pi' = Pi/K
# log2(Pi') = log2(Pi) - log2(K)
logprob_Ai -= logsumexp2(logprob_Ai)
logprob_Bi -= logsumexp2(logprob_Bi)
# update output and transition probabilities
si = self._states[i]
for j in range(N):
sj = self._states[j]
model._transitions[si].update(sj, logprob_Ai[j])
if update_outputs:
for k in range(M):
ok = self._symbols[k]
model._outputs[si].update(ok, logprob_Bi[k])
# Rabiner says the priors don't need to be updated. I don't
# believe him. FIXME
# test for convergence
if iteration > 0 and abs(logprob - last_logprob) < epsilon:
converged = True
print("iteration", iteration, "logprob", logprob)
iteration += 1
last_logprob = logprob
return model
def train_supervised(self, labelled_sequences, estimator=None):
"""
Supervised training maximising the joint probability of the symbol and
state sequences. This is done via collecting frequencies of
transitions between states, symbol observations while within each
state and which states start a sentence. These frequency distributions
are then normalised into probability estimates, which can be
smoothed if desired.
:return: the trained model
:rtype: HiddenMarkovModelTagger
:param labelled_sequences: the training data, a set of
labelled sequences of observations
:type labelled_sequences: list
:param estimator: a function taking
a FreqDist and a number of bins and returning a CProbDistI;
otherwise a MLE estimate is used
"""
# default to the MLE estimate
if estimator is None:
estimator = lambda fdist, bins: MLEProbDist(fdist)
# count occurrences of starting states, transitions out of each state
# and output symbols observed in each state
known_symbols = set(self._symbols)
known_states = set(self._states)
starting = FreqDist()
transitions = ConditionalFreqDist()
outputs = ConditionalFreqDist()
for sequence in labelled_sequences:
lasts = None
for token in sequence:
state = token[_TAG]
symbol = token[_TEXT]
if lasts is None:
starting[state] += 1
else:
transitions[lasts][state] += 1
outputs[state][symbol] += 1
lasts = state
# update the state and symbol lists
if state not in known_states:
self._states.append(state)
known_states.add(state)
if symbol not in known_symbols:
self._symbols.append(symbol)
known_symbols.add(symbol)
# create probability distributions (with smoothing)
N = len(self._states)
pi = estimator(starting, N)
A = ConditionalProbDist(transitions, estimator, N)
B = ConditionalProbDist(outputs, estimator, len(self._symbols))
return HiddenMarkovModelTagger(self._symbols, self._states, A, B, pi)
def _market_hmm_example():
"""
Return an example HMM (described at page 381, Huang et al)
"""
states = ["bull", "bear", "static"]
symbols = ["up", "down", "unchanged"]
A = np.array([[0.6, 0.2, 0.2], [0.5, 0.3, 0.2], [0.4, 0.1, 0.5]], np.float64)
B = np.array([[0.7, 0.1, 0.2], [0.1, 0.6, 0.3], [0.3, 0.3, 0.4]], np.float64)
pi = np.array([0.5, 0.2, 0.3], np.float64)
model = _create_hmm_tagger(states, symbols, A, B, pi)
return model, states, symbols
def demo_bw():
# demo Baum Welch by generating some sequences and then performing
# unsupervised training on them
print()
print("Baum-Welch demo for market example")
print()
model, states, symbols = _market_hmm_example()
# generate some random sequences
training = []
import random
rng = random.Random()
rng.seed(0)
for i in range(10):
item = model.random_sample(rng, 5)
training.append([(i[0], None) for i in item])
# train on those examples, starting with the model that generated them
trainer = HiddenMarkovModelTrainer(states, symbols)
hmm = trainer.train_unsupervised(training, model=model, max_iterations=1000) | null |
170,834 | import logging
import pickle
import random
from collections import defaultdict
from nltk import jsontags
from nltk.data import find, load
from nltk.tag.api import TaggerI
def _pc(n, d):
return (n / d) * 100 | null |
170,835 | import logging
import pickle
import random
from collections import defaultdict
from nltk import jsontags
from nltk.data import find, load
from nltk.tag.api import TaggerI
PICKLE = "averaged_perceptron_tagger.pickle"
class PerceptronTagger(TaggerI):
"""
Greedy Averaged Perceptron tagger, as implemented by Matthew Honnibal.
See more implementation details here:
https://explosion.ai/blog/part-of-speech-pos-tagger-in-python
>>> from nltk.tag.perceptron import PerceptronTagger
Train the model
>>> tagger = PerceptronTagger(load=False)
>>> tagger.train([[('today','NN'),('is','VBZ'),('good','JJ'),('day','NN')],
... [('yes','NNS'),('it','PRP'),('beautiful','JJ')]])
>>> tagger.tag(['today','is','a','beautiful','day'])
[('today', 'NN'), ('is', 'PRP'), ('a', 'PRP'), ('beautiful', 'JJ'), ('day', 'NN')]
Use the pretrain model (the default constructor)
>>> pretrain = PerceptronTagger()
>>> pretrain.tag('The quick brown fox jumps over the lazy dog'.split())
[('The', 'DT'), ('quick', 'JJ'), ('brown', 'NN'), ('fox', 'NN'), ('jumps', 'VBZ'), ('over', 'IN'), ('the', 'DT'), ('lazy', 'JJ'), ('dog', 'NN')]
>>> pretrain.tag("The red cat".split())
[('The', 'DT'), ('red', 'JJ'), ('cat', 'NN')]
"""
json_tag = "nltk.tag.sequential.PerceptronTagger"
START = ["-START-", "-START2-"]
END = ["-END-", "-END2-"]
def __init__(self, load=True):
"""
:param load: Load the pickled model upon instantiation.
"""
self.model = AveragedPerceptron()
self.tagdict = {}
self.classes = set()
if load:
AP_MODEL_LOC = "file:" + str(
find("taggers/averaged_perceptron_tagger/" + PICKLE)
)
self.load(AP_MODEL_LOC)
def tag(self, tokens, return_conf=False, use_tagdict=True):
"""
Tag tokenized sentences.
:params tokens: list of word
:type tokens: list(str)
"""
prev, prev2 = self.START
output = []
context = self.START + [self.normalize(w) for w in tokens] + self.END
for i, word in enumerate(tokens):
tag, conf = (
(self.tagdict.get(word), 1.0) if use_tagdict == True else (None, None)
)
if not tag:
features = self._get_features(i, word, context, prev, prev2)
tag, conf = self.model.predict(features, return_conf)
output.append((word, tag, conf) if return_conf == True else (word, tag))
prev2 = prev
prev = tag
return output
def train(self, sentences, save_loc=None, nr_iter=5):
"""Train a model from sentences, and save it at ``save_loc``. ``nr_iter``
controls the number of Perceptron training iterations.
:param sentences: A list or iterator of sentences, where each sentence
is a list of (words, tags) tuples.
:param save_loc: If not ``None``, saves a pickled model in this location.
:param nr_iter: Number of training iterations.
"""
# We'd like to allow ``sentences`` to be either a list or an iterator,
# the latter being especially important for a large training dataset.
# Because ``self._make_tagdict(sentences)`` runs regardless, we make
# it populate ``self._sentences`` (a list) with all the sentences.
# This saves the overheard of just iterating through ``sentences`` to
# get the list by ``sentences = list(sentences)``.
self._sentences = list() # to be populated by self._make_tagdict...
self._make_tagdict(sentences)
self.model.classes = self.classes
for iter_ in range(nr_iter):
c = 0
n = 0
for sentence in self._sentences:
words, tags = zip(*sentence)
prev, prev2 = self.START
context = self.START + [self.normalize(w) for w in words] + self.END
for i, word in enumerate(words):
guess = self.tagdict.get(word)
if not guess:
feats = self._get_features(i, word, context, prev, prev2)
guess, _ = self.model.predict(feats)
self.model.update(tags[i], guess, feats)
prev2 = prev
prev = guess
c += guess == tags[i]
n += 1
random.shuffle(self._sentences)
logging.info(f"Iter {iter_}: {c}/{n}={_pc(c, n)}")
# We don't need the training sentences anymore, and we don't want to
# waste space on them when we pickle the trained tagger.
self._sentences = None
self.model.average_weights()
# Pickle as a binary file
if save_loc is not None:
with open(save_loc, "wb") as fout:
# changed protocol from -1 to 2 to make pickling Python 2 compatible
pickle.dump((self.model.weights, self.tagdict, self.classes), fout, 2)
def load(self, loc):
"""
:param loc: Load a pickled model at location.
:type loc: str
"""
self.model.weights, self.tagdict, self.classes = load(loc)
self.model.classes = self.classes
def encode_json_obj(self):
return self.model.weights, self.tagdict, list(self.classes)
def decode_json_obj(cls, obj):
tagger = cls(load=False)
tagger.model.weights, tagger.tagdict, tagger.classes = obj
tagger.classes = set(tagger.classes)
tagger.model.classes = tagger.classes
return tagger
def normalize(self, word):
"""
Normalization used in pre-processing.
- All words are lower cased
- Groups of digits of length 4 are represented as !YEAR;
- Other digits are represented as !DIGITS
:rtype: str
"""
if "-" in word and word[0] != "-":
return "!HYPHEN"
if word.isdigit() and len(word) == 4:
return "!YEAR"
if word and word[0].isdigit():
return "!DIGITS"
return word.lower()
def _get_features(self, i, word, context, prev, prev2):
"""Map tokens into a feature representation, implemented as a
{hashable: int} dict. If the features change, a new model must be
trained.
"""
def add(name, *args):
features[" ".join((name,) + tuple(args))] += 1
i += len(self.START)
features = defaultdict(int)
# It's useful to have a constant feature, which acts sort of like a prior
add("bias")
add("i suffix", word[-3:])
add("i pref1", word[0] if word else "")
add("i-1 tag", prev)
add("i-2 tag", prev2)
add("i tag+i-2 tag", prev, prev2)
add("i word", context[i])
add("i-1 tag+i word", prev, context[i])
add("i-1 word", context[i - 1])
add("i-1 suffix", context[i - 1][-3:])
add("i-2 word", context[i - 2])
add("i+1 word", context[i + 1])
add("i+1 suffix", context[i + 1][-3:])
add("i+2 word", context[i + 2])
return features
def _make_tagdict(self, sentences):
"""
Make a tag dictionary for single-tag words.
:param sentences: A list of list of (word, tag) tuples.
"""
counts = defaultdict(lambda: defaultdict(int))
for sentence in sentences:
self._sentences.append(sentence)
for word, tag in sentence:
counts[word][tag] += 1
self.classes.add(tag)
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
# Don't add rare words to the tag dictionary
# Only add quite unambiguous words
if n >= freq_thresh and (mode / n) >= ambiguity_thresh:
self.tagdict[word] = tag
def _load_data_conll_format(filename):
print("Read from file: ", filename)
with open(filename, "rb") as fin:
sentences = []
sentence = []
for line in fin.readlines():
line = line.strip()
# print line
if len(line) == 0:
sentences.append(sentence)
sentence = []
continue
tokens = line.split("\t")
word = tokens[1]
tag = tokens[4]
sentence.append((word, tag))
return sentences
def _get_pretrain_model():
# Train and test on English part of ConLL data (WSJ part of Penn Treebank)
# Train: section 2-11
# Test : section 23
tagger = PerceptronTagger()
training = _load_data_conll_format("english_ptb_train.conll")
testing = _load_data_conll_format("english_ptb_test.conll")
print("Size of training and testing (sentence)", len(training), len(testing))
# Train and save the model
tagger.train(training, PICKLE)
print("Accuracy : ", tagger.accuracy(testing)) | null |
170,836 | from math import log
from operator import itemgetter
from nltk.probability import ConditionalFreqDist, FreqDist
from nltk.tag.api import TaggerI
The provided code snippet includes necessary dependencies for implementing the `basic_sent_chop` function. Write a Python function `def basic_sent_chop(data, raw=True)` to solve the following problem:
Basic method for tokenizing input into sentences for this tagger: :param data: list of tokens (words or (word, tag) tuples) :type data: str or tuple(str, str) :param raw: boolean flag marking the input data as a list of words or a list of tagged words :type raw: bool :return: list of sentences sentences are a list of tokens tokens are the same as the input Function takes a list of tokens and separates the tokens into lists where each list represents a sentence fragment This function can separate both tagged and raw sequences into basic sentences. Sentence markers are the set of [,.!?] This is a simple method which enhances the performance of the TnT tagger. Better sentence tokenization will further enhance the results.
Here is the function:
def basic_sent_chop(data, raw=True):
"""
Basic method for tokenizing input into sentences
for this tagger:
:param data: list of tokens (words or (word, tag) tuples)
:type data: str or tuple(str, str)
:param raw: boolean flag marking the input data
as a list of words or a list of tagged words
:type raw: bool
:return: list of sentences
sentences are a list of tokens
tokens are the same as the input
Function takes a list of tokens and separates the tokens into lists
where each list represents a sentence fragment
This function can separate both tagged and raw sequences into
basic sentences.
Sentence markers are the set of [,.!?]
This is a simple method which enhances the performance of the TnT
tagger. Better sentence tokenization will further enhance the results.
"""
new_data = []
curr_sent = []
sent_mark = [",", ".", "?", "!"]
if raw:
for word in data:
if word in sent_mark:
curr_sent.append(word)
new_data.append(curr_sent)
curr_sent = []
else:
curr_sent.append(word)
else:
for (word, tag) in data:
if word in sent_mark:
curr_sent.append((word, tag))
new_data.append(curr_sent)
curr_sent = []
else:
curr_sent.append((word, tag))
return new_data | Basic method for tokenizing input into sentences for this tagger: :param data: list of tokens (words or (word, tag) tuples) :type data: str or tuple(str, str) :param raw: boolean flag marking the input data as a list of words or a list of tagged words :type raw: bool :return: list of sentences sentences are a list of tokens tokens are the same as the input Function takes a list of tokens and separates the tokens into lists where each list represents a sentence fragment This function can separate both tagged and raw sequences into basic sentences. Sentence markers are the set of [,.!?] This is a simple method which enhances the performance of the TnT tagger. Better sentence tokenization will further enhance the results. |
170,837 | from math import log
from operator import itemgetter
from nltk.probability import ConditionalFreqDist, FreqDist
from nltk.tag.api import TaggerI
class TnT(TaggerI):
"""
TnT - Statistical POS tagger
IMPORTANT NOTES:
* DOES NOT AUTOMATICALLY DEAL WITH UNSEEN WORDS
- It is possible to provide an untrained POS tagger to
create tags for unknown words, see __init__ function
* SHOULD BE USED WITH SENTENCE-DELIMITED INPUT
- Due to the nature of this tagger, it works best when
trained over sentence delimited input.
- However it still produces good results if the training
data and testing data are separated on all punctuation eg: [,.?!]
- Input for training is expected to be a list of sentences
where each sentence is a list of (word, tag) tuples
- Input for tag function is a single sentence
Input for tagdata function is a list of sentences
Output is of a similar form
* Function provided to process text that is unsegmented
- Please see basic_sent_chop()
TnT uses a second order Markov model to produce tags for
a sequence of input, specifically:
argmax [Proj(P(t_i|t_i-1,t_i-2)P(w_i|t_i))] P(t_T+1 | t_T)
IE: the maximum projection of a set of probabilities
The set of possible tags for a given word is derived
from the training data. It is the set of all tags
that exact word has been assigned.
To speed up and get more precision, we can use log addition
to instead multiplication, specifically:
argmax [Sigma(log(P(t_i|t_i-1,t_i-2))+log(P(w_i|t_i)))] +
log(P(t_T+1|t_T))
The probability of a tag for a given word is the linear
interpolation of 3 markov models; a zero-order, first-order,
and a second order model.
P(t_i| t_i-1, t_i-2) = l1*P(t_i) + l2*P(t_i| t_i-1) +
l3*P(t_i| t_i-1, t_i-2)
A beam search is used to limit the memory usage of the algorithm.
The degree of the beam can be changed using N in the initialization.
N represents the maximum number of possible solutions to maintain
while tagging.
It is possible to differentiate the tags which are assigned to
capitalized words. However this does not result in a significant
gain in the accuracy of the results.
"""
def __init__(self, unk=None, Trained=False, N=1000, C=False):
"""
Construct a TnT statistical tagger. Tagger must be trained
before being used to tag input.
:param unk: instance of a POS tagger, conforms to TaggerI
:type unk: TaggerI
:param Trained: Indication that the POS tagger is trained or not
:type Trained: bool
:param N: Beam search degree (see above)
:type N: int
:param C: Capitalization flag
:type C: bool
Initializer, creates frequency distributions to be used
for tagging
_lx values represent the portion of the tri/bi/uni taggers
to be used to calculate the probability
N value is the number of possible solutions to maintain
while tagging. A good value for this is 1000
C is a boolean value which specifies to use or
not use the Capitalization of the word as additional
information for tagging.
NOTE: using capitalization may not increase the accuracy
of the tagger
"""
self._uni = FreqDist()
self._bi = ConditionalFreqDist()
self._tri = ConditionalFreqDist()
self._wd = ConditionalFreqDist()
self._eos = ConditionalFreqDist()
self._l1 = 0.0
self._l2 = 0.0
self._l3 = 0.0
self._N = N
self._C = C
self._T = Trained
self._unk = unk
# statistical tools (ignore or delete me)
self.unknown = 0
self.known = 0
def train(self, data):
"""
Uses a set of tagged data to train the tagger.
If an unknown word tagger is specified,
it is trained on the same data.
:param data: List of lists of (word, tag) tuples
:type data: tuple(str)
"""
# Ensure that local C flag is initialized before use
C = False
if self._unk is not None and self._T == False:
self._unk.train(data)
for sent in data:
history = [("BOS", False), ("BOS", False)]
for w, t in sent:
# if capitalization is requested,
# and the word begins with a capital
# set local flag C to True
if self._C and w[0].isupper():
C = True
self._wd[w][t] += 1
self._uni[(t, C)] += 1
self._bi[history[1]][(t, C)] += 1
self._tri[tuple(history)][(t, C)] += 1
history.append((t, C))
history.pop(0)
# set local flag C to false for the next word
C = False
self._eos[t]["EOS"] += 1
# compute lambda values from the trained frequency distributions
self._compute_lambda()
def _compute_lambda(self):
"""
creates lambda values based upon training data
NOTE: no need to explicitly reference C,
it is contained within the tag variable :: tag == (tag,C)
for each tag trigram (t1, t2, t3)
depending on the maximum value of
- f(t1,t2,t3)-1 / f(t1,t2)-1
- f(t2,t3)-1 / f(t2)-1
- f(t3)-1 / N-1
increment l3,l2, or l1 by f(t1,t2,t3)
ISSUES -- Resolutions:
if 2 values are equal, increment both lambda values
by (f(t1,t2,t3) / 2)
"""
# temporary lambda variables
tl1 = 0.0
tl2 = 0.0
tl3 = 0.0
# for each t1,t2 in system
for history in self._tri.conditions():
(h1, h2) = history
# for each t3 given t1,t2 in system
# (NOTE: tag actually represents (tag,C))
# However no effect within this function
for tag in self._tri[history].keys():
# if there has only been 1 occurrence of this tag in the data
# then ignore this trigram.
if self._uni[tag] == 1:
continue
# safe_div provides a safe floating point division
# it returns -1 if the denominator is 0
c3 = self._safe_div(
(self._tri[history][tag] - 1), (self._tri[history].N() - 1)
)
c2 = self._safe_div((self._bi[h2][tag] - 1), (self._bi[h2].N() - 1))
c1 = self._safe_div((self._uni[tag] - 1), (self._uni.N() - 1))
# if c1 is the maximum value:
if (c1 > c3) and (c1 > c2):
tl1 += self._tri[history][tag]
# if c2 is the maximum value
elif (c2 > c3) and (c2 > c1):
tl2 += self._tri[history][tag]
# if c3 is the maximum value
elif (c3 > c2) and (c3 > c1):
tl3 += self._tri[history][tag]
# if c3, and c2 are equal and larger than c1
elif (c3 == c2) and (c3 > c1):
tl2 += self._tri[history][tag] / 2.0
tl3 += self._tri[history][tag] / 2.0
# if c1, and c2 are equal and larger than c3
# this might be a dumb thing to do....(not sure yet)
elif (c2 == c1) and (c1 > c3):
tl1 += self._tri[history][tag] / 2.0
tl2 += self._tri[history][tag] / 2.0
# otherwise there might be a problem
# eg: all values = 0
else:
pass
# Lambda normalisation:
# ensures that l1+l2+l3 = 1
self._l1 = tl1 / (tl1 + tl2 + tl3)
self._l2 = tl2 / (tl1 + tl2 + tl3)
self._l3 = tl3 / (tl1 + tl2 + tl3)
def _safe_div(self, v1, v2):
"""
Safe floating point division function, does not allow division by 0
returns -1 if the denominator is 0
"""
if v2 == 0:
return -1
else:
return v1 / v2
def tagdata(self, data):
"""
Tags each sentence in a list of sentences
:param data:list of list of words
:type data: [[string,],]
:return: list of list of (word, tag) tuples
Invokes tag(sent) function for each sentence
compiles the results into a list of tagged sentences
each tagged sentence is a list of (word, tag) tuples
"""
res = []
for sent in data:
res1 = self.tag(sent)
res.append(res1)
return res
def tag(self, data):
"""
Tags a single sentence
:param data: list of words
:type data: [string,]
:return: [(word, tag),]
Calls recursive function '_tagword'
to produce a list of tags
Associates the sequence of returned tags
with the correct words in the input sequence
returns a list of (word, tag) tuples
"""
current_state = [(["BOS", "BOS"], 0.0)]
sent = list(data)
tags = self._tagword(sent, current_state)
res = []
for i in range(len(sent)):
# unpack and discard the C flags
(t, C) = tags[i + 2]
res.append((sent[i], t))
return res
def _tagword(self, sent, current_states):
"""
:param sent : List of words remaining in the sentence
:type sent : [word,]
:param current_states : List of possible tag combinations for
the sentence so far, and the log probability
associated with each tag combination
:type current_states : [([tag, ], logprob), ]
Tags the first word in the sentence and
recursively tags the reminder of sentence
Uses formula specified above to calculate the probability
of a particular tag
"""
# if this word marks the end of the sentence,
# return the most probable tag
if sent == []:
(h, logp) = current_states[0]
return h
# otherwise there are more words to be tagged
word = sent[0]
sent = sent[1:]
new_states = []
# if the Capitalisation is requested,
# initialise the flag for this word
C = False
if self._C and word[0].isupper():
C = True
# if word is known
# compute the set of possible tags
# and their associated log probabilities
if word in self._wd:
self.known += 1
for (history, curr_sent_logprob) in current_states:
logprobs = []
for t in self._wd[word].keys():
tC = (t, C)
p_uni = self._uni.freq(tC)
p_bi = self._bi[history[-1]].freq(tC)
p_tri = self._tri[tuple(history[-2:])].freq(tC)
p_wd = self._wd[word][t] / self._uni[tC]
p = self._l1 * p_uni + self._l2 * p_bi + self._l3 * p_tri
p2 = log(p, 2) + log(p_wd, 2)
# compute the result of appending each tag to this history
new_states.append((history + [tC], curr_sent_logprob + p2))
# otherwise a new word, set of possible tags is unknown
else:
self.unknown += 1
# since a set of possible tags,
# and the probability of each specific tag
# can not be returned from most classifiers:
# specify that any unknown words are tagged with certainty
p = 1
# if no unknown word tagger has been specified
# then use the tag 'Unk'
if self._unk is None:
tag = ("Unk", C)
# otherwise apply the unknown word tagger
else:
[(_w, t)] = list(self._unk.tag([word]))
tag = (t, C)
for (history, logprob) in current_states:
history.append(tag)
new_states = current_states
# now have computed a set of possible new_states
# sort states by log prob
# set is now ordered greatest to least log probability
new_states.sort(reverse=True, key=itemgetter(1))
# del everything after N (threshold)
# this is the beam search cut
if len(new_states) > self._N:
new_states = new_states[: self._N]
# compute the tags for the rest of the sentence
# return the best list of tags for the sentence
return self._tagword(sent, new_states)
brown: CategorizedTaggedCorpusReader = LazyCorpusLoader(
"brown",
CategorizedTaggedCorpusReader,
r"c[a-z]\d\d",
cat_file="cats.txt",
tagset="brown",
encoding="ascii",
)
def demo():
from nltk.corpus import brown
sents = list(brown.tagged_sents())
test = list(brown.sents())
tagger = TnT()
tagger.train(sents[200:1000])
tagged_data = tagger.tagdata(test[100:120])
for j in range(len(tagged_data)):
s = tagged_data[j]
t = sents[j + 100]
for i in range(len(s)):
print(s[i], "--", t[i])
print() | null |
170,838 | from math import log
from operator import itemgetter
from nltk.probability import ConditionalFreqDist, FreqDist
from nltk.tag.api import TaggerI
class TnT(TaggerI):
"""
TnT - Statistical POS tagger
IMPORTANT NOTES:
* DOES NOT AUTOMATICALLY DEAL WITH UNSEEN WORDS
- It is possible to provide an untrained POS tagger to
create tags for unknown words, see __init__ function
* SHOULD BE USED WITH SENTENCE-DELIMITED INPUT
- Due to the nature of this tagger, it works best when
trained over sentence delimited input.
- However it still produces good results if the training
data and testing data are separated on all punctuation eg: [,.?!]
- Input for training is expected to be a list of sentences
where each sentence is a list of (word, tag) tuples
- Input for tag function is a single sentence
Input for tagdata function is a list of sentences
Output is of a similar form
* Function provided to process text that is unsegmented
- Please see basic_sent_chop()
TnT uses a second order Markov model to produce tags for
a sequence of input, specifically:
argmax [Proj(P(t_i|t_i-1,t_i-2)P(w_i|t_i))] P(t_T+1 | t_T)
IE: the maximum projection of a set of probabilities
The set of possible tags for a given word is derived
from the training data. It is the set of all tags
that exact word has been assigned.
To speed up and get more precision, we can use log addition
to instead multiplication, specifically:
argmax [Sigma(log(P(t_i|t_i-1,t_i-2))+log(P(w_i|t_i)))] +
log(P(t_T+1|t_T))
The probability of a tag for a given word is the linear
interpolation of 3 markov models; a zero-order, first-order,
and a second order model.
P(t_i| t_i-1, t_i-2) = l1*P(t_i) + l2*P(t_i| t_i-1) +
l3*P(t_i| t_i-1, t_i-2)
A beam search is used to limit the memory usage of the algorithm.
The degree of the beam can be changed using N in the initialization.
N represents the maximum number of possible solutions to maintain
while tagging.
It is possible to differentiate the tags which are assigned to
capitalized words. However this does not result in a significant
gain in the accuracy of the results.
"""
def __init__(self, unk=None, Trained=False, N=1000, C=False):
"""
Construct a TnT statistical tagger. Tagger must be trained
before being used to tag input.
:param unk: instance of a POS tagger, conforms to TaggerI
:type unk: TaggerI
:param Trained: Indication that the POS tagger is trained or not
:type Trained: bool
:param N: Beam search degree (see above)
:type N: int
:param C: Capitalization flag
:type C: bool
Initializer, creates frequency distributions to be used
for tagging
_lx values represent the portion of the tri/bi/uni taggers
to be used to calculate the probability
N value is the number of possible solutions to maintain
while tagging. A good value for this is 1000
C is a boolean value which specifies to use or
not use the Capitalization of the word as additional
information for tagging.
NOTE: using capitalization may not increase the accuracy
of the tagger
"""
self._uni = FreqDist()
self._bi = ConditionalFreqDist()
self._tri = ConditionalFreqDist()
self._wd = ConditionalFreqDist()
self._eos = ConditionalFreqDist()
self._l1 = 0.0
self._l2 = 0.0
self._l3 = 0.0
self._N = N
self._C = C
self._T = Trained
self._unk = unk
# statistical tools (ignore or delete me)
self.unknown = 0
self.known = 0
def train(self, data):
"""
Uses a set of tagged data to train the tagger.
If an unknown word tagger is specified,
it is trained on the same data.
:param data: List of lists of (word, tag) tuples
:type data: tuple(str)
"""
# Ensure that local C flag is initialized before use
C = False
if self._unk is not None and self._T == False:
self._unk.train(data)
for sent in data:
history = [("BOS", False), ("BOS", False)]
for w, t in sent:
# if capitalization is requested,
# and the word begins with a capital
# set local flag C to True
if self._C and w[0].isupper():
C = True
self._wd[w][t] += 1
self._uni[(t, C)] += 1
self._bi[history[1]][(t, C)] += 1
self._tri[tuple(history)][(t, C)] += 1
history.append((t, C))
history.pop(0)
# set local flag C to false for the next word
C = False
self._eos[t]["EOS"] += 1
# compute lambda values from the trained frequency distributions
self._compute_lambda()
def _compute_lambda(self):
"""
creates lambda values based upon training data
NOTE: no need to explicitly reference C,
it is contained within the tag variable :: tag == (tag,C)
for each tag trigram (t1, t2, t3)
depending on the maximum value of
- f(t1,t2,t3)-1 / f(t1,t2)-1
- f(t2,t3)-1 / f(t2)-1
- f(t3)-1 / N-1
increment l3,l2, or l1 by f(t1,t2,t3)
ISSUES -- Resolutions:
if 2 values are equal, increment both lambda values
by (f(t1,t2,t3) / 2)
"""
# temporary lambda variables
tl1 = 0.0
tl2 = 0.0
tl3 = 0.0
# for each t1,t2 in system
for history in self._tri.conditions():
(h1, h2) = history
# for each t3 given t1,t2 in system
# (NOTE: tag actually represents (tag,C))
# However no effect within this function
for tag in self._tri[history].keys():
# if there has only been 1 occurrence of this tag in the data
# then ignore this trigram.
if self._uni[tag] == 1:
continue
# safe_div provides a safe floating point division
# it returns -1 if the denominator is 0
c3 = self._safe_div(
(self._tri[history][tag] - 1), (self._tri[history].N() - 1)
)
c2 = self._safe_div((self._bi[h2][tag] - 1), (self._bi[h2].N() - 1))
c1 = self._safe_div((self._uni[tag] - 1), (self._uni.N() - 1))
# if c1 is the maximum value:
if (c1 > c3) and (c1 > c2):
tl1 += self._tri[history][tag]
# if c2 is the maximum value
elif (c2 > c3) and (c2 > c1):
tl2 += self._tri[history][tag]
# if c3 is the maximum value
elif (c3 > c2) and (c3 > c1):
tl3 += self._tri[history][tag]
# if c3, and c2 are equal and larger than c1
elif (c3 == c2) and (c3 > c1):
tl2 += self._tri[history][tag] / 2.0
tl3 += self._tri[history][tag] / 2.0
# if c1, and c2 are equal and larger than c3
# this might be a dumb thing to do....(not sure yet)
elif (c2 == c1) and (c1 > c3):
tl1 += self._tri[history][tag] / 2.0
tl2 += self._tri[history][tag] / 2.0
# otherwise there might be a problem
# eg: all values = 0
else:
pass
# Lambda normalisation:
# ensures that l1+l2+l3 = 1
self._l1 = tl1 / (tl1 + tl2 + tl3)
self._l2 = tl2 / (tl1 + tl2 + tl3)
self._l3 = tl3 / (tl1 + tl2 + tl3)
def _safe_div(self, v1, v2):
"""
Safe floating point division function, does not allow division by 0
returns -1 if the denominator is 0
"""
if v2 == 0:
return -1
else:
return v1 / v2
def tagdata(self, data):
"""
Tags each sentence in a list of sentences
:param data:list of list of words
:type data: [[string,],]
:return: list of list of (word, tag) tuples
Invokes tag(sent) function for each sentence
compiles the results into a list of tagged sentences
each tagged sentence is a list of (word, tag) tuples
"""
res = []
for sent in data:
res1 = self.tag(sent)
res.append(res1)
return res
def tag(self, data):
"""
Tags a single sentence
:param data: list of words
:type data: [string,]
:return: [(word, tag),]
Calls recursive function '_tagword'
to produce a list of tags
Associates the sequence of returned tags
with the correct words in the input sequence
returns a list of (word, tag) tuples
"""
current_state = [(["BOS", "BOS"], 0.0)]
sent = list(data)
tags = self._tagword(sent, current_state)
res = []
for i in range(len(sent)):
# unpack and discard the C flags
(t, C) = tags[i + 2]
res.append((sent[i], t))
return res
def _tagword(self, sent, current_states):
"""
:param sent : List of words remaining in the sentence
:type sent : [word,]
:param current_states : List of possible tag combinations for
the sentence so far, and the log probability
associated with each tag combination
:type current_states : [([tag, ], logprob), ]
Tags the first word in the sentence and
recursively tags the reminder of sentence
Uses formula specified above to calculate the probability
of a particular tag
"""
# if this word marks the end of the sentence,
# return the most probable tag
if sent == []:
(h, logp) = current_states[0]
return h
# otherwise there are more words to be tagged
word = sent[0]
sent = sent[1:]
new_states = []
# if the Capitalisation is requested,
# initialise the flag for this word
C = False
if self._C and word[0].isupper():
C = True
# if word is known
# compute the set of possible tags
# and their associated log probabilities
if word in self._wd:
self.known += 1
for (history, curr_sent_logprob) in current_states:
logprobs = []
for t in self._wd[word].keys():
tC = (t, C)
p_uni = self._uni.freq(tC)
p_bi = self._bi[history[-1]].freq(tC)
p_tri = self._tri[tuple(history[-2:])].freq(tC)
p_wd = self._wd[word][t] / self._uni[tC]
p = self._l1 * p_uni + self._l2 * p_bi + self._l3 * p_tri
p2 = log(p, 2) + log(p_wd, 2)
# compute the result of appending each tag to this history
new_states.append((history + [tC], curr_sent_logprob + p2))
# otherwise a new word, set of possible tags is unknown
else:
self.unknown += 1
# since a set of possible tags,
# and the probability of each specific tag
# can not be returned from most classifiers:
# specify that any unknown words are tagged with certainty
p = 1
# if no unknown word tagger has been specified
# then use the tag 'Unk'
if self._unk is None:
tag = ("Unk", C)
# otherwise apply the unknown word tagger
else:
[(_w, t)] = list(self._unk.tag([word]))
tag = (t, C)
for (history, logprob) in current_states:
history.append(tag)
new_states = current_states
# now have computed a set of possible new_states
# sort states by log prob
# set is now ordered greatest to least log probability
new_states.sort(reverse=True, key=itemgetter(1))
# del everything after N (threshold)
# this is the beam search cut
if len(new_states) > self._N:
new_states = new_states[: self._N]
# compute the tags for the rest of the sentence
# return the best list of tags for the sentence
return self._tagword(sent, new_states)
treebank: BracketParseCorpusReader = LazyCorpusLoader(
"treebank/combined",
BracketParseCorpusReader,
r"wsj_.*\.mrg",
tagset="wsj",
encoding="ascii",
)
def demo2():
from nltk.corpus import treebank
d = list(treebank.tagged_sents())
t = TnT(N=1000, C=False)
s = TnT(N=1000, C=True)
t.train(d[(11) * 100 :])
s.train(d[(11) * 100 :])
for i in range(10):
tacc = t.accuracy(d[i * 100 : ((i + 1) * 100)])
tp_un = t.unknown / (t.known + t.unknown)
tp_kn = t.known / (t.known + t.unknown)
t.unknown = 0
t.known = 0
print("Capitalization off:")
print("Accuracy:", tacc)
print("Percentage known:", tp_kn)
print("Percentage unknown:", tp_un)
print("Accuracy over known words:", (tacc / tp_kn))
sacc = s.accuracy(d[i * 100 : ((i + 1) * 100)])
sp_un = s.unknown / (s.known + s.unknown)
sp_kn = s.known / (s.known + s.unknown)
s.unknown = 0
s.known = 0
print("Capitalization on:")
print("Accuracy:", sacc)
print("Percentage known:", sp_kn)
print("Percentage unknown:", sp_un)
print("Accuracy over known words:", (sacc / sp_kn)) | null |
170,839 | from math import log
from operator import itemgetter
from nltk.probability import ConditionalFreqDist, FreqDist
from nltk.tag.api import TaggerI
class TnT(TaggerI):
"""
TnT - Statistical POS tagger
IMPORTANT NOTES:
* DOES NOT AUTOMATICALLY DEAL WITH UNSEEN WORDS
- It is possible to provide an untrained POS tagger to
create tags for unknown words, see __init__ function
* SHOULD BE USED WITH SENTENCE-DELIMITED INPUT
- Due to the nature of this tagger, it works best when
trained over sentence delimited input.
- However it still produces good results if the training
data and testing data are separated on all punctuation eg: [,.?!]
- Input for training is expected to be a list of sentences
where each sentence is a list of (word, tag) tuples
- Input for tag function is a single sentence
Input for tagdata function is a list of sentences
Output is of a similar form
* Function provided to process text that is unsegmented
- Please see basic_sent_chop()
TnT uses a second order Markov model to produce tags for
a sequence of input, specifically:
argmax [Proj(P(t_i|t_i-1,t_i-2)P(w_i|t_i))] P(t_T+1 | t_T)
IE: the maximum projection of a set of probabilities
The set of possible tags for a given word is derived
from the training data. It is the set of all tags
that exact word has been assigned.
To speed up and get more precision, we can use log addition
to instead multiplication, specifically:
argmax [Sigma(log(P(t_i|t_i-1,t_i-2))+log(P(w_i|t_i)))] +
log(P(t_T+1|t_T))
The probability of a tag for a given word is the linear
interpolation of 3 markov models; a zero-order, first-order,
and a second order model.
P(t_i| t_i-1, t_i-2) = l1*P(t_i) + l2*P(t_i| t_i-1) +
l3*P(t_i| t_i-1, t_i-2)
A beam search is used to limit the memory usage of the algorithm.
The degree of the beam can be changed using N in the initialization.
N represents the maximum number of possible solutions to maintain
while tagging.
It is possible to differentiate the tags which are assigned to
capitalized words. However this does not result in a significant
gain in the accuracy of the results.
"""
def __init__(self, unk=None, Trained=False, N=1000, C=False):
"""
Construct a TnT statistical tagger. Tagger must be trained
before being used to tag input.
:param unk: instance of a POS tagger, conforms to TaggerI
:type unk: TaggerI
:param Trained: Indication that the POS tagger is trained or not
:type Trained: bool
:param N: Beam search degree (see above)
:type N: int
:param C: Capitalization flag
:type C: bool
Initializer, creates frequency distributions to be used
for tagging
_lx values represent the portion of the tri/bi/uni taggers
to be used to calculate the probability
N value is the number of possible solutions to maintain
while tagging. A good value for this is 1000
C is a boolean value which specifies to use or
not use the Capitalization of the word as additional
information for tagging.
NOTE: using capitalization may not increase the accuracy
of the tagger
"""
self._uni = FreqDist()
self._bi = ConditionalFreqDist()
self._tri = ConditionalFreqDist()
self._wd = ConditionalFreqDist()
self._eos = ConditionalFreqDist()
self._l1 = 0.0
self._l2 = 0.0
self._l3 = 0.0
self._N = N
self._C = C
self._T = Trained
self._unk = unk
# statistical tools (ignore or delete me)
self.unknown = 0
self.known = 0
def train(self, data):
"""
Uses a set of tagged data to train the tagger.
If an unknown word tagger is specified,
it is trained on the same data.
:param data: List of lists of (word, tag) tuples
:type data: tuple(str)
"""
# Ensure that local C flag is initialized before use
C = False
if self._unk is not None and self._T == False:
self._unk.train(data)
for sent in data:
history = [("BOS", False), ("BOS", False)]
for w, t in sent:
# if capitalization is requested,
# and the word begins with a capital
# set local flag C to True
if self._C and w[0].isupper():
C = True
self._wd[w][t] += 1
self._uni[(t, C)] += 1
self._bi[history[1]][(t, C)] += 1
self._tri[tuple(history)][(t, C)] += 1
history.append((t, C))
history.pop(0)
# set local flag C to false for the next word
C = False
self._eos[t]["EOS"] += 1
# compute lambda values from the trained frequency distributions
self._compute_lambda()
def _compute_lambda(self):
"""
creates lambda values based upon training data
NOTE: no need to explicitly reference C,
it is contained within the tag variable :: tag == (tag,C)
for each tag trigram (t1, t2, t3)
depending on the maximum value of
- f(t1,t2,t3)-1 / f(t1,t2)-1
- f(t2,t3)-1 / f(t2)-1
- f(t3)-1 / N-1
increment l3,l2, or l1 by f(t1,t2,t3)
ISSUES -- Resolutions:
if 2 values are equal, increment both lambda values
by (f(t1,t2,t3) / 2)
"""
# temporary lambda variables
tl1 = 0.0
tl2 = 0.0
tl3 = 0.0
# for each t1,t2 in system
for history in self._tri.conditions():
(h1, h2) = history
# for each t3 given t1,t2 in system
# (NOTE: tag actually represents (tag,C))
# However no effect within this function
for tag in self._tri[history].keys():
# if there has only been 1 occurrence of this tag in the data
# then ignore this trigram.
if self._uni[tag] == 1:
continue
# safe_div provides a safe floating point division
# it returns -1 if the denominator is 0
c3 = self._safe_div(
(self._tri[history][tag] - 1), (self._tri[history].N() - 1)
)
c2 = self._safe_div((self._bi[h2][tag] - 1), (self._bi[h2].N() - 1))
c1 = self._safe_div((self._uni[tag] - 1), (self._uni.N() - 1))
# if c1 is the maximum value:
if (c1 > c3) and (c1 > c2):
tl1 += self._tri[history][tag]
# if c2 is the maximum value
elif (c2 > c3) and (c2 > c1):
tl2 += self._tri[history][tag]
# if c3 is the maximum value
elif (c3 > c2) and (c3 > c1):
tl3 += self._tri[history][tag]
# if c3, and c2 are equal and larger than c1
elif (c3 == c2) and (c3 > c1):
tl2 += self._tri[history][tag] / 2.0
tl3 += self._tri[history][tag] / 2.0
# if c1, and c2 are equal and larger than c3
# this might be a dumb thing to do....(not sure yet)
elif (c2 == c1) and (c1 > c3):
tl1 += self._tri[history][tag] / 2.0
tl2 += self._tri[history][tag] / 2.0
# otherwise there might be a problem
# eg: all values = 0
else:
pass
# Lambda normalisation:
# ensures that l1+l2+l3 = 1
self._l1 = tl1 / (tl1 + tl2 + tl3)
self._l2 = tl2 / (tl1 + tl2 + tl3)
self._l3 = tl3 / (tl1 + tl2 + tl3)
def _safe_div(self, v1, v2):
"""
Safe floating point division function, does not allow division by 0
returns -1 if the denominator is 0
"""
if v2 == 0:
return -1
else:
return v1 / v2
def tagdata(self, data):
"""
Tags each sentence in a list of sentences
:param data:list of list of words
:type data: [[string,],]
:return: list of list of (word, tag) tuples
Invokes tag(sent) function for each sentence
compiles the results into a list of tagged sentences
each tagged sentence is a list of (word, tag) tuples
"""
res = []
for sent in data:
res1 = self.tag(sent)
res.append(res1)
return res
def tag(self, data):
"""
Tags a single sentence
:param data: list of words
:type data: [string,]
:return: [(word, tag),]
Calls recursive function '_tagword'
to produce a list of tags
Associates the sequence of returned tags
with the correct words in the input sequence
returns a list of (word, tag) tuples
"""
current_state = [(["BOS", "BOS"], 0.0)]
sent = list(data)
tags = self._tagword(sent, current_state)
res = []
for i in range(len(sent)):
# unpack and discard the C flags
(t, C) = tags[i + 2]
res.append((sent[i], t))
return res
def _tagword(self, sent, current_states):
"""
:param sent : List of words remaining in the sentence
:type sent : [word,]
:param current_states : List of possible tag combinations for
the sentence so far, and the log probability
associated with each tag combination
:type current_states : [([tag, ], logprob), ]
Tags the first word in the sentence and
recursively tags the reminder of sentence
Uses formula specified above to calculate the probability
of a particular tag
"""
# if this word marks the end of the sentence,
# return the most probable tag
if sent == []:
(h, logp) = current_states[0]
return h
# otherwise there are more words to be tagged
word = sent[0]
sent = sent[1:]
new_states = []
# if the Capitalisation is requested,
# initialise the flag for this word
C = False
if self._C and word[0].isupper():
C = True
# if word is known
# compute the set of possible tags
# and their associated log probabilities
if word in self._wd:
self.known += 1
for (history, curr_sent_logprob) in current_states:
logprobs = []
for t in self._wd[word].keys():
tC = (t, C)
p_uni = self._uni.freq(tC)
p_bi = self._bi[history[-1]].freq(tC)
p_tri = self._tri[tuple(history[-2:])].freq(tC)
p_wd = self._wd[word][t] / self._uni[tC]
p = self._l1 * p_uni + self._l2 * p_bi + self._l3 * p_tri
p2 = log(p, 2) + log(p_wd, 2)
# compute the result of appending each tag to this history
new_states.append((history + [tC], curr_sent_logprob + p2))
# otherwise a new word, set of possible tags is unknown
else:
self.unknown += 1
# since a set of possible tags,
# and the probability of each specific tag
# can not be returned from most classifiers:
# specify that any unknown words are tagged with certainty
p = 1
# if no unknown word tagger has been specified
# then use the tag 'Unk'
if self._unk is None:
tag = ("Unk", C)
# otherwise apply the unknown word tagger
else:
[(_w, t)] = list(self._unk.tag([word]))
tag = (t, C)
for (history, logprob) in current_states:
history.append(tag)
new_states = current_states
# now have computed a set of possible new_states
# sort states by log prob
# set is now ordered greatest to least log probability
new_states.sort(reverse=True, key=itemgetter(1))
# del everything after N (threshold)
# this is the beam search cut
if len(new_states) > self._N:
new_states = new_states[: self._N]
# compute the tags for the rest of the sentence
# return the best list of tags for the sentence
return self._tagword(sent, new_states)
brown: CategorizedTaggedCorpusReader = LazyCorpusLoader(
"brown",
CategorizedTaggedCorpusReader,
r"c[a-z]\d\d",
cat_file="cats.txt",
tagset="brown",
encoding="ascii",
)
treebank: BracketParseCorpusReader = LazyCorpusLoader(
"treebank/combined",
BracketParseCorpusReader,
r"wsj_.*\.mrg",
tagset="wsj",
encoding="ascii",
)
def demo3():
from nltk.corpus import brown, treebank
d = list(treebank.tagged_sents())
e = list(brown.tagged_sents())
d = d[:1000]
e = e[:1000]
d10 = int(len(d) * 0.1)
e10 = int(len(e) * 0.1)
tknacc = 0
sknacc = 0
tallacc = 0
sallacc = 0
tknown = 0
sknown = 0
for i in range(10):
t = TnT(N=1000, C=False)
s = TnT(N=1000, C=False)
dtest = d[(i * d10) : ((i + 1) * d10)]
etest = e[(i * e10) : ((i + 1) * e10)]
dtrain = d[: (i * d10)] + d[((i + 1) * d10) :]
etrain = e[: (i * e10)] + e[((i + 1) * e10) :]
t.train(dtrain)
s.train(etrain)
tacc = t.accuracy(dtest)
tp_un = t.unknown / (t.known + t.unknown)
tp_kn = t.known / (t.known + t.unknown)
tknown += tp_kn
t.unknown = 0
t.known = 0
sacc = s.accuracy(etest)
sp_un = s.unknown / (s.known + s.unknown)
sp_kn = s.known / (s.known + s.unknown)
sknown += sp_kn
s.unknown = 0
s.known = 0
tknacc += tacc / tp_kn
sknacc += sacc / tp_kn
tallacc += tacc
sallacc += sacc
# print(i+1, (tacc / tp_kn), i+1, (sacc / tp_kn), i+1, tacc, i+1, sacc)
print("brown: acc over words known:", 10 * tknacc)
print(" : overall accuracy:", 10 * tallacc)
print(" : words known:", 10 * tknown)
print("treebank: acc over words known:", 10 * sknacc)
print(" : overall accuracy:", 10 * sallacc)
print(" : words known:", 10 * sknown) | null |
170,840 | from nltk.chat.util import Chat, reflections
def eliza_chat():
print("Therapist\n---------")
print("Talk to the program by typing in plain English, using normal upper-")
print('and lower-case letters and punctuation. Enter "quit" when done.')
print("=" * 72)
print("Hello. How are you feeling today?")
eliza_chatbot.converse()
def demo():
eliza_chat() | null |
170,841 | from nltk.chat.util import Chat, reflections
def zen_chat():
print("*" * 75)
print("Zen Chatbot!".center(75))
print("*" * 75)
print('"Look beyond mere words and letters - look into your mind"'.center(75))
print("* Talk your way to truth with Zen Chatbot.")
print("* Type 'quit' when you have had enough.")
print("*" * 75)
print("Welcome, my child.")
zen_chatbot.converse()
def demo():
zen_chat() | null |
170,842 | from nltk.chat.util import Chat, reflections
def rude_chat():
print("Talk to the program by typing in plain English, using normal upper-")
print('and lower-case letters and punctuation. Enter "quit" when done.')
print("=" * 72)
print("I suppose I should say hello.")
rude_chatbot.converse()
def demo():
rude_chat() | null |
170,843 | from nltk.chat.util import Chat, reflections
def suntsu_chat():
print("Talk to the program by typing in plain English, using normal upper-")
print('and lower-case letters and punctuation. Enter "quit" when done.')
print("=" * 72)
print("You seek enlightenment?")
suntsu_chatbot.converse()
def demo():
suntsu_chat() | null |
170,844 | from nltk.chat.util import Chat
def iesha_chat():
print("Iesha the TeenBoT\n---------")
print("Talk to the program by typing in plain English, using normal upper-")
print('and lower-case letters and punctuation. Enter "quit" when done.')
print("=" * 72)
print("hi!! i'm iesha! who r u??!")
iesha_chatbot.converse()
def demo():
iesha_chat() | null |
170,845 | import os
from functools import wraps
def add_py3_data(path):
def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]:
def py3_data(init_func):
def _decorator(*args, **kwargs):
args = (args[0], add_py3_data(args[1])) + args[2:]
return init_func(*args, **kwargs)
return wraps(init_func)(_decorator) | null |
170,846 | import math
import nltk.classify.util
from nltk.util import LazyMap
The provided code snippet includes necessary dependencies for implementing the `apply_features` function. Write a Python function `def apply_features(feature_func, toks, labeled=None)` to solve the following problem:
Use the ``LazyMap`` class to construct a lazy list-like object that is analogous to ``map(feature_func, toks)``. In particular, if ``labeled=False``, then the returned list-like object's values are equal to:: [feature_func(tok) for tok in toks] If ``labeled=True``, then the returned list-like object's values are equal to:: [(feature_func(tok), label) for (tok, label) in toks] The primary purpose of this function is to avoid the memory overhead involved in storing all the featuresets for every token in a corpus. Instead, these featuresets are constructed lazily, as-needed. The reduction in memory overhead can be especially significant when the underlying list of tokens is itself lazy (as is the case with many corpus readers). :param feature_func: The function that will be applied to each token. It should return a featureset -- i.e., a dict mapping feature names to feature values. :param toks: The list of tokens to which ``feature_func`` should be applied. If ``labeled=True``, then the list elements will be passed directly to ``feature_func()``. If ``labeled=False``, then the list elements should be tuples ``(tok,label)``, and ``tok`` will be passed to ``feature_func()``. :param labeled: If true, then ``toks`` contains labeled tokens -- i.e., tuples of the form ``(tok, label)``. (Default: auto-detect based on types.)
Here is the function:
def apply_features(feature_func, toks, labeled=None):
"""
Use the ``LazyMap`` class to construct a lazy list-like
object that is analogous to ``map(feature_func, toks)``. In
particular, if ``labeled=False``, then the returned list-like
object's values are equal to::
[feature_func(tok) for tok in toks]
If ``labeled=True``, then the returned list-like object's values
are equal to::
[(feature_func(tok), label) for (tok, label) in toks]
The primary purpose of this function is to avoid the memory
overhead involved in storing all the featuresets for every token
in a corpus. Instead, these featuresets are constructed lazily,
as-needed. The reduction in memory overhead can be especially
significant when the underlying list of tokens is itself lazy (as
is the case with many corpus readers).
:param feature_func: The function that will be applied to each
token. It should return a featureset -- i.e., a dict
mapping feature names to feature values.
:param toks: The list of tokens to which ``feature_func`` should be
applied. If ``labeled=True``, then the list elements will be
passed directly to ``feature_func()``. If ``labeled=False``,
then the list elements should be tuples ``(tok,label)``, and
``tok`` will be passed to ``feature_func()``.
:param labeled: If true, then ``toks`` contains labeled tokens --
i.e., tuples of the form ``(tok, label)``. (Default:
auto-detect based on types.)
"""
if labeled is None:
labeled = toks and isinstance(toks[0], (tuple, list))
if labeled:
def lazy_func(labeled_token):
return (feature_func(labeled_token[0]), labeled_token[1])
return LazyMap(lazy_func, toks)
else:
return LazyMap(feature_func, toks) | Use the ``LazyMap`` class to construct a lazy list-like object that is analogous to ``map(feature_func, toks)``. In particular, if ``labeled=False``, then the returned list-like object's values are equal to:: [feature_func(tok) for tok in toks] If ``labeled=True``, then the returned list-like object's values are equal to:: [(feature_func(tok), label) for (tok, label) in toks] The primary purpose of this function is to avoid the memory overhead involved in storing all the featuresets for every token in a corpus. Instead, these featuresets are constructed lazily, as-needed. The reduction in memory overhead can be especially significant when the underlying list of tokens is itself lazy (as is the case with many corpus readers). :param feature_func: The function that will be applied to each token. It should return a featureset -- i.e., a dict mapping feature names to feature values. :param toks: The list of tokens to which ``feature_func`` should be applied. If ``labeled=True``, then the list elements will be passed directly to ``feature_func()``. If ``labeled=False``, then the list elements should be tuples ``(tok,label)``, and ``tok`` will be passed to ``feature_func()``. :param labeled: If true, then ``toks`` contains labeled tokens -- i.e., tuples of the form ``(tok, label)``. (Default: auto-detect based on types.) |
170,847 | import math
import nltk.classify.util
from nltk.util import LazyMap
The provided code snippet includes necessary dependencies for implementing the `attested_labels` function. Write a Python function `def attested_labels(tokens)` to solve the following problem:
:return: A list of all labels that are attested in the given list of tokens. :rtype: list of (immutable) :param tokens: The list of classified tokens from which to extract labels. A classified token has the form ``(token, label)``. :type tokens: list
Here is the function:
def attested_labels(tokens):
"""
:return: A list of all labels that are attested in the given list
of tokens.
:rtype: list of (immutable)
:param tokens: The list of classified tokens from which to extract
labels. A classified token has the form ``(token, label)``.
:type tokens: list
"""
return tuple({label for (tok, label) in tokens}) | :return: A list of all labels that are attested in the given list of tokens. :rtype: list of (immutable) :param tokens: The list of classified tokens from which to extract labels. A classified token has the form ``(token, label)``. :type tokens: list |
170,848 | import math
import nltk.classify.util
from nltk.util import LazyMap
def accuracy(classifier, gold):
results = classifier.classify_many([fs for (fs, l) in gold])
correct = [l == r for ((fs, l), r) in zip(gold, results)]
if correct:
return sum(correct) / len(correct)
else:
return 0
_inst_cache = {}
senseval: SensevalCorpusReader = LazyCorpusLoader(
"senseval", SensevalCorpusReader, r"(?!\.).*\.pos"
)
def wsd_demo(trainer, word, features, n=1000):
import random
from nltk.corpus import senseval
# Get the instances.
print("Reading data...")
global _inst_cache
if word not in _inst_cache:
_inst_cache[word] = [(i, i.senses[0]) for i in senseval.instances(word)]
instances = _inst_cache[word][:]
if n > len(instances):
n = len(instances)
senses = list({l for (i, l) in instances})
print(" Senses: " + " ".join(senses))
# Randomly split the names into a test & train set.
print("Splitting into test & train...")
random.seed(123456)
random.shuffle(instances)
train = instances[: int(0.8 * n)]
test = instances[int(0.8 * n) : n]
# Train up a classifier.
print("Training classifier...")
classifier = trainer([(features(i), l) for (i, l) in train])
# Run the classifier on the test data.
print("Testing classifier...")
acc = accuracy(classifier, [(features(i), l) for (i, l) in test])
print("Accuracy: %6.4f" % acc)
# For classifiers that can find probabilities, show the log
# likelihood and some sample probability distributions.
try:
test_featuresets = [features(i) for (i, n) in test]
pdists = classifier.prob_classify_many(test_featuresets)
ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)]
print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test)))
except NotImplementedError:
pass
# Return the classifier
return classifier | null |
170,849 | import math
import nltk.classify.util
from nltk.util import LazyMap
The provided code snippet includes necessary dependencies for implementing the `check_megam_config` function. Write a Python function `def check_megam_config()` to solve the following problem:
Checks whether the MEGAM binary is configured.
Here is the function:
def check_megam_config():
"""
Checks whether the MEGAM binary is configured.
"""
try:
_megam_bin
except NameError as e:
err_msg = str(
"Please configure your megam binary first, e.g.\n"
">>> nltk.config_megam('/usr/bin/local/megam')"
)
raise NameError(err_msg) from e | Checks whether the MEGAM binary is configured. |
170,850 | from nltk.classify.maxent import MaxentClassifier
from nltk.classify.util import accuracy
from nltk.tokenize import RegexpTokenizer
def rte_featurize(rte_pairs):
return [(rte_features(pair), pair.value) for pair in rte_pairs]
class MaxentClassifier(ClassifierI):
"""
A maximum entropy classifier (also known as a "conditional
exponential classifier"). This classifier is parameterized by a
set of "weights", which are used to combine the joint-features
that are generated from a featureset by an "encoding". In
particular, the encoding maps each ``(featureset, label)`` pair to
a vector. The probability of each label is then computed using
the following equation::
dotprod(weights, encode(fs,label))
prob(fs|label) = ---------------------------------------------------
sum(dotprod(weights, encode(fs,l)) for l in labels)
Where ``dotprod`` is the dot product::
dotprod(a,b) = sum(x*y for (x,y) in zip(a,b))
"""
def __init__(self, encoding, weights, logarithmic=True):
"""
Construct a new maxent classifier model. Typically, new
classifier models are created using the ``train()`` method.
:type encoding: MaxentFeatureEncodingI
:param encoding: An encoding that is used to convert the
featuresets that are given to the ``classify`` method into
joint-feature vectors, which are used by the maxent
classifier model.
:type weights: list of float
:param weights: The feature weight vector for this classifier.
:type logarithmic: bool
:param logarithmic: If false, then use non-logarithmic weights.
"""
self._encoding = encoding
self._weights = weights
self._logarithmic = logarithmic
# self._logarithmic = False
assert encoding.length() == len(weights)
def labels(self):
return self._encoding.labels()
def set_weights(self, new_weights):
"""
Set the feature weight vector for this classifier.
:param new_weights: The new feature weight vector.
:type new_weights: list of float
"""
self._weights = new_weights
assert self._encoding.length() == len(new_weights)
def weights(self):
"""
:return: The feature weight vector for this classifier.
:rtype: list of float
"""
return self._weights
def classify(self, featureset):
return self.prob_classify(featureset).max()
def prob_classify(self, featureset):
prob_dict = {}
for label in self._encoding.labels():
feature_vector = self._encoding.encode(featureset, label)
if self._logarithmic:
total = 0.0
for (f_id, f_val) in feature_vector:
total += self._weights[f_id] * f_val
prob_dict[label] = total
else:
prod = 1.0
for (f_id, f_val) in feature_vector:
prod *= self._weights[f_id] ** f_val
prob_dict[label] = prod
# Normalize the dictionary to give a probability distribution
return DictionaryProbDist(prob_dict, log=self._logarithmic, normalize=True)
def explain(self, featureset, columns=4):
"""
Print a table showing the effect of each of the features in
the given feature set, and how they combine to determine the
probabilities of each label for that featureset.
"""
descr_width = 50
TEMPLATE = " %-" + str(descr_width - 2) + "s%s%8.3f"
pdist = self.prob_classify(featureset)
labels = sorted(pdist.samples(), key=pdist.prob, reverse=True)
labels = labels[:columns]
print(
" Feature".ljust(descr_width)
+ "".join("%8s" % (("%s" % l)[:7]) for l in labels)
)
print(" " + "-" * (descr_width - 2 + 8 * len(labels)))
sums = defaultdict(int)
for i, label in enumerate(labels):
feature_vector = self._encoding.encode(featureset, label)
feature_vector.sort(
key=lambda fid__: abs(self._weights[fid__[0]]), reverse=True
)
for (f_id, f_val) in feature_vector:
if self._logarithmic:
score = self._weights[f_id] * f_val
else:
score = self._weights[f_id] ** f_val
descr = self._encoding.describe(f_id)
descr = descr.split(" and label is ")[0] # hack
descr += " (%s)" % f_val # hack
if len(descr) > 47:
descr = descr[:44] + "..."
print(TEMPLATE % (descr, i * 8 * " ", score))
sums[label] += score
print(" " + "-" * (descr_width - 1 + 8 * len(labels)))
print(
" TOTAL:".ljust(descr_width) + "".join("%8.3f" % sums[l] for l in labels)
)
print(
" PROBS:".ljust(descr_width)
+ "".join("%8.3f" % pdist.prob(l) for l in labels)
)
def most_informative_features(self, n=10):
"""
Generates the ranked list of informative features from most to least.
"""
if hasattr(self, "_most_informative_features"):
return self._most_informative_features[:n]
else:
self._most_informative_features = sorted(
list(range(len(self._weights))),
key=lambda fid: abs(self._weights[fid]),
reverse=True,
)
return self._most_informative_features[:n]
def show_most_informative_features(self, n=10, show="all"):
"""
:param show: all, neg, or pos (for negative-only or positive-only)
:type show: str
:param n: The no. of top features
:type n: int
"""
# Use None the full list of ranked features.
fids = self.most_informative_features(None)
if show == "pos":
fids = [fid for fid in fids if self._weights[fid] > 0]
elif show == "neg":
fids = [fid for fid in fids if self._weights[fid] < 0]
for fid in fids[:n]:
print(f"{self._weights[fid]:8.3f} {self._encoding.describe(fid)}")
def __repr__(self):
return "<ConditionalExponentialClassifier: %d labels, %d features>" % (
len(self._encoding.labels()),
self._encoding.length(),
)
#: A list of the algorithm names that are accepted for the
#: ``train()`` method's ``algorithm`` parameter.
ALGORITHMS = ["GIS", "IIS", "MEGAM", "TADM"]
def train(
cls,
train_toks,
algorithm=None,
trace=3,
encoding=None,
labels=None,
gaussian_prior_sigma=0,
**cutoffs,
):
"""
Train a new maxent classifier based on the given corpus of
training samples. This classifier will have its weights
chosen to maximize entropy while remaining empirically
consistent with the training corpus.
:rtype: MaxentClassifier
:return: The new maxent classifier
:type train_toks: list
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a featureset,
and the second of which is a classification label.
:type algorithm: str
:param algorithm: A case-insensitive string, specifying which
algorithm should be used to train the classifier. The
following algorithms are currently available.
- Iterative Scaling Methods: Generalized Iterative Scaling (``'GIS'``),
Improved Iterative Scaling (``'IIS'``)
- External Libraries (requiring megam):
LM-BFGS algorithm, with training performed by Megam (``'megam'``)
The default algorithm is ``'IIS'``.
:type trace: int
:param trace: The level of diagnostic tracing output to produce.
Higher values produce more verbose output.
:type encoding: MaxentFeatureEncodingI
:param encoding: A feature encoding, used to convert featuresets
into feature vectors. If none is specified, then a
``BinaryMaxentFeatureEncoding`` will be built based on the
features that are attested in the training corpus.
:type labels: list(str)
:param labels: The set of possible labels. If none is given, then
the set of all labels attested in the training data will be
used instead.
:param gaussian_prior_sigma: The sigma value for a gaussian
prior on model weights. Currently, this is supported by
``megam``. For other algorithms, its value is ignored.
:param cutoffs: Arguments specifying various conditions under
which the training should be halted. (Some of the cutoff
conditions are not supported by some algorithms.)
- ``max_iter=v``: Terminate after ``v`` iterations.
- ``min_ll=v``: Terminate after the negative average
log-likelihood drops under ``v``.
- ``min_lldelta=v``: Terminate if a single iteration improves
log likelihood by less than ``v``.
"""
if algorithm is None:
algorithm = "iis"
for key in cutoffs:
if key not in (
"max_iter",
"min_ll",
"min_lldelta",
"max_acc",
"min_accdelta",
"count_cutoff",
"norm",
"explicit",
"bernoulli",
):
raise TypeError("Unexpected keyword arg %r" % key)
algorithm = algorithm.lower()
if algorithm == "iis":
return train_maxent_classifier_with_iis(
train_toks, trace, encoding, labels, **cutoffs
)
elif algorithm == "gis":
return train_maxent_classifier_with_gis(
train_toks, trace, encoding, labels, **cutoffs
)
elif algorithm == "megam":
return train_maxent_classifier_with_megam(
train_toks, trace, encoding, labels, gaussian_prior_sigma, **cutoffs
)
elif algorithm == "tadm":
kwargs = cutoffs
kwargs["trace"] = trace
kwargs["encoding"] = encoding
kwargs["labels"] = labels
kwargs["gaussian_prior_sigma"] = gaussian_prior_sigma
return TadmMaxentClassifier.train(train_toks, **kwargs)
else:
raise ValueError("Unknown algorithm %s" % algorithm)
def accuracy(classifier, gold):
results = classifier.classify_many([fs for (fs, l) in gold])
correct = [l == r for ((fs, l), r) in zip(gold, results)]
if correct:
return sum(correct) / len(correct)
else:
return 0
rte: RTECorpusReader = LazyCorpusLoader("rte", RTECorpusReader, r"(?!\.).*\.xml")
def rte_classifier(algorithm, sample_N=None):
from nltk.corpus import rte as rte_corpus
train_set = rte_corpus.pairs(["rte1_dev.xml", "rte2_dev.xml", "rte3_dev.xml"])
test_set = rte_corpus.pairs(["rte1_test.xml", "rte2_test.xml", "rte3_test.xml"])
if sample_N is not None:
train_set = train_set[:sample_N]
test_set = test_set[:sample_N]
featurized_train_set = rte_featurize(train_set)
featurized_test_set = rte_featurize(test_set)
# Train the classifier
print("Training classifier...")
if algorithm in ["megam"]: # MEGAM based algorithms.
clf = MaxentClassifier.train(featurized_train_set, algorithm)
elif algorithm in ["GIS", "IIS"]: # Use default GIS/IIS MaxEnt algorithm
clf = MaxentClassifier.train(featurized_train_set, algorithm)
else:
err_msg = str(
"RTEClassifier only supports these algorithms:\n "
"'megam', 'GIS', 'IIS'.\n"
)
raise Exception(err_msg)
print("Testing classifier...")
acc = accuracy(clf, featurized_test_set)
print("Accuracy: %6.4f" % acc)
return clf | null |
170,851 | from sys import maxsize
from nltk.util import trigrams
class TextCat:
_corpus = None
fingerprints = {}
_START_CHAR = "<"
_END_CHAR = ">"
last_distances = {}
def __init__(self):
if not re:
raise OSError(
"classify.textcat requires the regex module that "
"supports unicode. Try '$ pip install regex' and "
"see https://pypi.python.org/pypi/regex for "
"further details."
)
from nltk.corpus import crubadan
self._corpus = crubadan
# Load all language ngrams into cache
for lang in self._corpus.langs():
self._corpus.lang_freq(lang)
def remove_punctuation(self, text):
"""Get rid of punctuation except apostrophes"""
return re.sub(r"[^\P{P}\']+", "", text)
def profile(self, text):
"""Create FreqDist of trigrams within text"""
from nltk import FreqDist, word_tokenize
clean_text = self.remove_punctuation(text)
tokens = word_tokenize(clean_text)
fingerprint = FreqDist()
for t in tokens:
token_trigram_tuples = trigrams(self._START_CHAR + t + self._END_CHAR)
token_trigrams = ["".join(tri) for tri in token_trigram_tuples]
for cur_trigram in token_trigrams:
if cur_trigram in fingerprint:
fingerprint[cur_trigram] += 1
else:
fingerprint[cur_trigram] = 1
return fingerprint
def calc_dist(self, lang, trigram, text_profile):
"""Calculate the "out-of-place" measure between the
text and language profile for a single trigram"""
lang_fd = self._corpus.lang_freq(lang)
dist = 0
if trigram in lang_fd:
idx_lang_profile = list(lang_fd.keys()).index(trigram)
idx_text = list(text_profile.keys()).index(trigram)
# print(idx_lang_profile, ", ", idx_text)
dist = abs(idx_lang_profile - idx_text)
else:
# Arbitrary but should be larger than
# any possible trigram file length
# in terms of total lines
dist = maxsize
return dist
def lang_dists(self, text):
"""Calculate the "out-of-place" measure between
the text and all languages"""
distances = {}
profile = self.profile(text)
# For all the languages
for lang in self._corpus._all_lang_freq.keys():
# Calculate distance metric for every trigram in
# input text to be identified
lang_dist = 0
for trigram in profile:
lang_dist += self.calc_dist(lang, trigram, profile)
distances[lang] = lang_dist
return distances
def guess_language(self, text):
"""Find the language with the min distance
to the text and return its ISO 639-3 code"""
self.last_distances = self.lang_dists(text)
return min(self.last_distances, key=self.last_distances.get)
#################################################')
udhr: UdhrCorpusReader = LazyCorpusLoader("udhr", UdhrCorpusReader)
def demo():
from nltk.corpus import udhr
langs = [
"Kurdish-UTF8",
"Abkhaz-UTF8",
"Farsi_Persian-UTF8",
"Hindi-UTF8",
"Hawaiian-UTF8",
"Russian-UTF8",
"Vietnamese-UTF8",
"Serbian_Srpski-UTF8",
"Esperanto-UTF8",
]
friendly = {
"kmr": "Northern Kurdish",
"abk": "Abkhazian",
"pes": "Iranian Persian",
"hin": "Hindi",
"haw": "Hawaiian",
"rus": "Russian",
"vie": "Vietnamese",
"srp": "Serbian",
"epo": "Esperanto",
}
tc = TextCat()
for cur_lang in langs:
# Get raw data from UDHR corpus
raw_sentences = udhr.sents(cur_lang)
rows = len(raw_sentences) - 1
cols = list(map(len, raw_sentences))
sample = ""
# Generate a sample text of the language
for i in range(0, rows):
cur_sent = ""
for j in range(0, cols[i]):
cur_sent += " " + raw_sentences[i][j]
sample += cur_sent
# Try to detect what it is
print("Language snippet: " + sample[0:140] + "...")
guess = tc.guess_language(sample)
print(f"Language detection: {guess} ({friendly[guess]})")
print("#" * 140) | null |
170,852 | from collections import defaultdict
from nltk.classify.naivebayes import NaiveBayesClassifier
from nltk.probability import DictionaryProbDist, ELEProbDist, FreqDist
class PositiveNaiveBayesClassifier(NaiveBayesClassifier):
def train(
positive_featuresets,
unlabeled_featuresets,
positive_prob_prior=0.5,
estimator=ELEProbDist,
):
"""
:param positive_featuresets: An iterable of featuresets that are known as positive
examples (i.e., their label is ``True``).
:param unlabeled_featuresets: An iterable of featuresets whose label is unknown.
:param positive_prob_prior: A prior estimate of the probability of the label
``True`` (default 0.5).
"""
positive_feature_freqdist = defaultdict(FreqDist)
unlabeled_feature_freqdist = defaultdict(FreqDist)
feature_values = defaultdict(set)
fnames = set()
# Count up how many times each feature value occurred in positive examples.
num_positive_examples = 0
for featureset in positive_featuresets:
for fname, fval in featureset.items():
positive_feature_freqdist[fname][fval] += 1
feature_values[fname].add(fval)
fnames.add(fname)
num_positive_examples += 1
# Count up how many times each feature value occurred in unlabeled examples.
num_unlabeled_examples = 0
for featureset in unlabeled_featuresets:
for fname, fval in featureset.items():
unlabeled_feature_freqdist[fname][fval] += 1
feature_values[fname].add(fval)
fnames.add(fname)
num_unlabeled_examples += 1
# If a feature didn't have a value given for an instance, then we assume that
# it gets the implicit value 'None'.
for fname in fnames:
count = positive_feature_freqdist[fname].N()
positive_feature_freqdist[fname][None] += num_positive_examples - count
feature_values[fname].add(None)
for fname in fnames:
count = unlabeled_feature_freqdist[fname].N()
unlabeled_feature_freqdist[fname][None] += num_unlabeled_examples - count
feature_values[fname].add(None)
negative_prob_prior = 1.0 - positive_prob_prior
# Create the P(label) distribution.
label_probdist = DictionaryProbDist(
{True: positive_prob_prior, False: negative_prob_prior}
)
# Create the P(fval|label, fname) distribution.
feature_probdist = {}
for fname, freqdist in positive_feature_freqdist.items():
probdist = estimator(freqdist, bins=len(feature_values[fname]))
feature_probdist[True, fname] = probdist
for fname, freqdist in unlabeled_feature_freqdist.items():
global_probdist = estimator(freqdist, bins=len(feature_values[fname]))
negative_feature_probs = {}
for fval in feature_values[fname]:
prob = (
global_probdist.prob(fval)
- positive_prob_prior * feature_probdist[True, fname].prob(fval)
) / negative_prob_prior
# TODO: We need to add some kind of smoothing here, instead of
# setting negative probabilities to zero and normalizing.
negative_feature_probs[fval] = max(prob, 0.0)
feature_probdist[False, fname] = DictionaryProbDist(
negative_feature_probs, normalize=True
)
return PositiveNaiveBayesClassifier(label_probdist, feature_probdist)
def partial_names_demo(trainer, features=names_demo_features):
import random
from nltk.corpus import names
male_names = names.words("male.txt")
female_names = names.words("female.txt")
random.seed(654321)
random.shuffle(male_names)
random.shuffle(female_names)
# Create a list of male names to be used as positive-labeled examples for training
positive = map(features, male_names[:2000])
# Create a list of male and female names to be used as unlabeled examples
unlabeled = map(features, male_names[2000:2500] + female_names[:500])
# Create a test set with correctly-labeled male and female names
test = [(name, True) for name in male_names[2500:2750]] + [
(name, False) for name in female_names[500:750]
]
random.shuffle(test)
# Train up a classifier.
print("Training classifier...")
classifier = trainer(positive, unlabeled)
# Run the classifier on the test data.
print("Testing classifier...")
acc = accuracy(classifier, [(features(n), m) for (n, m) in test])
print("Accuracy: %6.4f" % acc)
# For classifiers that can find probabilities, show the log
# likelihood and some sample probability distributions.
try:
test_featuresets = [features(n) for (n, m) in test]
pdists = classifier.prob_classify_many(test_featuresets)
ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)]
print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test)))
print()
print("Unseen Names P(Male) P(Female)\n" + "-" * 40)
for ((name, is_male), pdist) in zip(test, pdists)[:5]:
if is_male == True:
fmt = " %-15s *%6.4f %6.4f"
else:
fmt = " %-15s %6.4f *%6.4f"
print(fmt % (name, pdist.prob(True), pdist.prob(False)))
except NotImplementedError:
pass
# Return the classifier
return classifier
def demo():
from nltk.classify.util import partial_names_demo
classifier = partial_names_demo(PositiveNaiveBayesClassifier.train)
classifier.show_most_informative_features() | null |
170,853 | try:
import numpy
except ImportError:
pass
import os
import tempfile
from collections import defaultdict
from nltk.classify.api import ClassifierI
from nltk.classify.megam import call_megam, parse_megam_weights, write_megam_file
from nltk.classify.tadm import call_tadm, parse_tadm_weights, write_tadm_file
from nltk.classify.util import CutoffChecker, accuracy, log_likelihood
from nltk.data import gzip_open_unicode
from nltk.probability import DictionaryProbDist
from nltk.util import OrderedDict
ConditionalExponentialClassifier = MaxentClassifier
class GISEncoding(BinaryMaxentFeatureEncoding):
"""
A binary feature encoding which adds one new joint-feature to the
joint-features defined by ``BinaryMaxentFeatureEncoding``: a
correction feature, whose value is chosen to ensure that the
sparse vector always sums to a constant non-negative number. This
new feature is used to ensure two preconditions for the GIS
training algorithm:
- At least one feature vector index must be nonzero for every
token.
- The feature vector must sum to a constant non-negative number
for every token.
"""
def __init__(
self, labels, mapping, unseen_features=False, alwayson_features=False, C=None
):
"""
:param C: The correction constant. The value of the correction
feature is based on this value. In particular, its value is
``C - sum([v for (f,v) in encoding])``.
:seealso: ``BinaryMaxentFeatureEncoding.__init__``
"""
BinaryMaxentFeatureEncoding.__init__(
self, labels, mapping, unseen_features, alwayson_features
)
if C is None:
C = len({fname for (fname, fval, label) in mapping}) + 1
self._C = C
def C(self):
"""The non-negative constant that all encoded feature vectors
will sum to."""
return self._C
def encode(self, featureset, label):
# Get the basic encoding.
encoding = BinaryMaxentFeatureEncoding.encode(self, featureset, label)
base_length = BinaryMaxentFeatureEncoding.length(self)
# Add a correction feature.
total = sum(v for (f, v) in encoding)
if total >= self._C:
raise ValueError("Correction feature is not high enough!")
encoding.append((base_length, self._C - total))
# Return the result
return encoding
def length(self):
return BinaryMaxentFeatureEncoding.length(self) + 1
def describe(self, f_id):
if f_id == BinaryMaxentFeatureEncoding.length(self):
return "Correction feature (%s)" % self._C
else:
return BinaryMaxentFeatureEncoding.describe(self, f_id)
def calculate_empirical_fcount(train_toks, encoding):
fcount = numpy.zeros(encoding.length(), "d")
for tok, label in train_toks:
for (index, val) in encoding.encode(tok, label):
fcount[index] += val
return fcount
def calculate_estimated_fcount(classifier, train_toks, encoding):
fcount = numpy.zeros(encoding.length(), "d")
for tok, label in train_toks:
pdist = classifier.prob_classify(tok)
for label in pdist.samples():
prob = pdist.prob(label)
for (fid, fval) in encoding.encode(tok, label):
fcount[fid] += prob * fval
return fcount
def log_likelihood(classifier, gold):
results = classifier.prob_classify_many([fs for (fs, l) in gold])
ll = [pdist.prob(l) for ((fs, l), pdist) in zip(gold, results)]
return math.log(sum(ll) / len(ll))
def accuracy(classifier, gold):
results = classifier.classify_many([fs for (fs, l) in gold])
correct = [l == r for ((fs, l), r) in zip(gold, results)]
if correct:
return sum(correct) / len(correct)
else:
return 0
class CutoffChecker:
"""
A helper class that implements cutoff checks based on number of
iterations and log likelihood.
Accuracy cutoffs are also implemented, but they're almost never
a good idea to use.
"""
def __init__(self, cutoffs):
self.cutoffs = cutoffs.copy()
if "min_ll" in cutoffs:
cutoffs["min_ll"] = -abs(cutoffs["min_ll"])
if "min_lldelta" in cutoffs:
cutoffs["min_lldelta"] = abs(cutoffs["min_lldelta"])
self.ll = None
self.acc = None
self.iter = 1
def check(self, classifier, train_toks):
cutoffs = self.cutoffs
self.iter += 1
if "max_iter" in cutoffs and self.iter >= cutoffs["max_iter"]:
return True # iteration cutoff.
new_ll = nltk.classify.util.log_likelihood(classifier, train_toks)
if math.isnan(new_ll):
return True
if "min_ll" in cutoffs or "min_lldelta" in cutoffs:
if "min_ll" in cutoffs and new_ll >= cutoffs["min_ll"]:
return True # log likelihood cutoff
if (
"min_lldelta" in cutoffs
and self.ll
and ((new_ll - self.ll) <= abs(cutoffs["min_lldelta"]))
):
return True # log likelihood delta cutoff
self.ll = new_ll
if "max_acc" in cutoffs or "min_accdelta" in cutoffs:
new_acc = nltk.classify.util.log_likelihood(classifier, train_toks)
if "max_acc" in cutoffs and new_acc >= cutoffs["max_acc"]:
return True # log likelihood cutoff
if (
"min_accdelta" in cutoffs
and self.acc
and ((new_acc - self.acc) <= abs(cutoffs["min_accdelta"]))
):
return True # log likelihood delta cutoff
self.acc = new_acc
return False # no cutoff reached.
The provided code snippet includes necessary dependencies for implementing the `train_maxent_classifier_with_gis` function. Write a Python function `def train_maxent_classifier_with_gis( train_toks, trace=3, encoding=None, labels=None, **cutoffs )` to solve the following problem:
Train a new ``ConditionalExponentialClassifier``, using the given training samples, using the Generalized Iterative Scaling algorithm. This ``ConditionalExponentialClassifier`` will encode the model that maximizes entropy from all the models that are empirically consistent with ``train_toks``. :see: ``train_maxent_classifier()`` for parameter descriptions.
Here is the function:
def train_maxent_classifier_with_gis(
train_toks, trace=3, encoding=None, labels=None, **cutoffs
):
"""
Train a new ``ConditionalExponentialClassifier``, using the given
training samples, using the Generalized Iterative Scaling
algorithm. This ``ConditionalExponentialClassifier`` will encode
the model that maximizes entropy from all the models that are
empirically consistent with ``train_toks``.
:see: ``train_maxent_classifier()`` for parameter descriptions.
"""
cutoffs.setdefault("max_iter", 100)
cutoffchecker = CutoffChecker(cutoffs)
# Construct an encoding from the training data.
if encoding is None:
encoding = GISEncoding.train(train_toks, labels=labels)
if not hasattr(encoding, "C"):
raise TypeError(
"The GIS algorithm requires an encoding that "
"defines C (e.g., GISEncoding)."
)
# Cinv is the inverse of the sum of each joint feature vector.
# This controls the learning rate: higher Cinv (or lower C) gives
# faster learning.
Cinv = 1.0 / encoding.C
# Count how many times each feature occurs in the training data.
empirical_fcount = calculate_empirical_fcount(train_toks, encoding)
# Check for any features that are not attested in train_toks.
unattested = set(numpy.nonzero(empirical_fcount == 0)[0])
# Build the classifier. Start with weight=0 for each attested
# feature, and weight=-infinity for each unattested feature.
weights = numpy.zeros(len(empirical_fcount), "d")
for fid in unattested:
weights[fid] = numpy.NINF
classifier = ConditionalExponentialClassifier(encoding, weights)
# Take the log of the empirical fcount.
log_empirical_fcount = numpy.log2(empirical_fcount)
del empirical_fcount
if trace > 0:
print(" ==> Training (%d iterations)" % cutoffs["max_iter"])
if trace > 2:
print()
print(" Iteration Log Likelihood Accuracy")
print(" ---------------------------------------")
# Train the classifier.
try:
while True:
if trace > 2:
ll = cutoffchecker.ll or log_likelihood(classifier, train_toks)
acc = cutoffchecker.acc or accuracy(classifier, train_toks)
iternum = cutoffchecker.iter
print(" %9d %14.5f %9.3f" % (iternum, ll, acc))
# Use the model to estimate the number of times each
# feature should occur in the training data.
estimated_fcount = calculate_estimated_fcount(
classifier, train_toks, encoding
)
# Take the log of estimated fcount (avoid taking log(0).)
for fid in unattested:
estimated_fcount[fid] += 1
log_estimated_fcount = numpy.log2(estimated_fcount)
del estimated_fcount
# Update the classifier weights
weights = classifier.weights()
weights += (log_empirical_fcount - log_estimated_fcount) * Cinv
classifier.set_weights(weights)
# Check the log-likelihood & accuracy cutoffs.
if cutoffchecker.check(classifier, train_toks):
break
except KeyboardInterrupt:
print(" Training stopped: keyboard interrupt")
except:
raise
if trace > 2:
ll = log_likelihood(classifier, train_toks)
acc = accuracy(classifier, train_toks)
print(f" Final {ll:14.5f} {acc:9.3f}")
# Return the classifier.
return classifier | Train a new ``ConditionalExponentialClassifier``, using the given training samples, using the Generalized Iterative Scaling algorithm. This ``ConditionalExponentialClassifier`` will encode the model that maximizes entropy from all the models that are empirically consistent with ``train_toks``. :see: ``train_maxent_classifier()`` for parameter descriptions. |
170,854 | try:
import numpy
except ImportError:
pass
import os
import tempfile
from collections import defaultdict
from nltk.classify.api import ClassifierI
from nltk.classify.megam import call_megam, parse_megam_weights, write_megam_file
from nltk.classify.tadm import call_tadm, parse_tadm_weights, write_tadm_file
from nltk.classify.util import CutoffChecker, accuracy, log_likelihood
from nltk.data import gzip_open_unicode
from nltk.probability import DictionaryProbDist
from nltk.util import OrderedDict
ConditionalExponentialClassifier = MaxentClassifier
class BinaryMaxentFeatureEncoding(MaxentFeatureEncodingI):
"""
A feature encoding that generates vectors containing a binary
joint-features of the form:
| joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label)
| {
| { 0 otherwise
Where ``fname`` is the name of an input-feature, ``fval`` is a value
for that input-feature, and ``label`` is a label.
Typically, these features are constructed based on a training
corpus, using the ``train()`` method. This method will create one
feature for each combination of ``fname``, ``fval``, and ``label``
that occurs at least once in the training corpus.
The ``unseen_features`` parameter can be used to add "unseen-value
features", which are used whenever an input feature has a value
that was not encountered in the training corpus. These features
have the form:
| joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname])
| { and l == label
| {
| { 0 otherwise
Where ``is_unseen(fname, fval)`` is true if the encoding does not
contain any joint features that are true when ``fs[fname]==fval``.
The ``alwayson_features`` parameter can be used to add "always-on
features", which have the form::
| joint_feat(fs, l) = { 1 if (l == label)
| {
| { 0 otherwise
These always-on features allow the maxent model to directly model
the prior probabilities of each label.
"""
def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False):
"""
:param labels: A list of the \"known labels\" for this encoding.
:param mapping: A dictionary mapping from ``(fname,fval,label)``
tuples to corresponding joint-feature indexes. These
indexes must be the set of integers from 0...len(mapping).
If ``mapping[fname,fval,label]=id``, then
``self.encode(..., fname:fval, ..., label)[id]`` is 1;
otherwise, it is 0.
:param unseen_features: If true, then include unseen value
features in the generated joint-feature vectors.
:param alwayson_features: If true, then include always-on
features in the generated joint-feature vectors.
"""
if set(mapping.values()) != set(range(len(mapping))):
raise ValueError(
"Mapping values must be exactly the "
"set of integers from 0...len(mapping)"
)
self._labels = list(labels)
"""A list of attested labels."""
self._mapping = mapping
"""dict mapping from (fname,fval,label) -> fid"""
self._length = len(mapping)
"""The length of generated joint feature vectors."""
self._alwayson = None
"""dict mapping from label -> fid"""
self._unseen = None
"""dict mapping from fname -> fid"""
if alwayson_features:
self._alwayson = {
label: i + self._length for (i, label) in enumerate(labels)
}
self._length += len(self._alwayson)
if unseen_features:
fnames = {fname for (fname, fval, label) in mapping}
self._unseen = {fname: i + self._length for (i, fname) in enumerate(fnames)}
self._length += len(fnames)
def encode(self, featureset, label):
# Inherit docs.
encoding = []
# Convert input-features to joint-features:
for fname, fval in featureset.items():
# Known feature name & value:
if (fname, fval, label) in self._mapping:
encoding.append((self._mapping[fname, fval, label], 1))
# Otherwise, we might want to fire an "unseen-value feature".
elif self._unseen:
# Have we seen this fname/fval combination with any label?
for label2 in self._labels:
if (fname, fval, label2) in self._mapping:
break # we've seen this fname/fval combo
# We haven't -- fire the unseen-value feature
else:
if fname in self._unseen:
encoding.append((self._unseen[fname], 1))
# Add always-on features:
if self._alwayson and label in self._alwayson:
encoding.append((self._alwayson[label], 1))
return encoding
def describe(self, f_id):
# Inherit docs.
if not isinstance(f_id, int):
raise TypeError("describe() expected an int")
try:
self._inv_mapping
except AttributeError:
self._inv_mapping = [-1] * len(self._mapping)
for (info, i) in self._mapping.items():
self._inv_mapping[i] = info
if f_id < len(self._mapping):
(fname, fval, label) = self._inv_mapping[f_id]
return f"{fname}=={fval!r} and label is {label!r}"
elif self._alwayson and f_id in self._alwayson.values():
for (label, f_id2) in self._alwayson.items():
if f_id == f_id2:
return "label is %r" % label
elif self._unseen and f_id in self._unseen.values():
for (fname, f_id2) in self._unseen.items():
if f_id == f_id2:
return "%s is unseen" % fname
else:
raise ValueError("Bad feature id")
def labels(self):
# Inherit docs.
return self._labels
def length(self):
# Inherit docs.
return self._length
def train(cls, train_toks, count_cutoff=0, labels=None, **options):
"""
Construct and return new feature encoding, based on a given
training corpus ``train_toks``. See the class description
``BinaryMaxentFeatureEncoding`` for a description of the
joint-features that will be included in this encoding.
:type train_toks: list(tuple(dict, str))
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
:type count_cutoff: int
:param count_cutoff: A cutoff value that is used to discard
rare joint-features. If a joint-feature's value is 1
fewer than ``count_cutoff`` times in the training corpus,
then that joint-feature is not included in the generated
encoding.
:type labels: list
:param labels: A list of labels that should be used by the
classifier. If not specified, then the set of labels
attested in ``train_toks`` will be used.
:param options: Extra parameters for the constructor, such as
``unseen_features`` and ``alwayson_features``.
"""
mapping = {} # maps (fname, fval, label) -> fid
seen_labels = set() # The set of labels we've encountered
count = defaultdict(int) # maps (fname, fval) -> count
for (tok, label) in train_toks:
if labels and label not in labels:
raise ValueError("Unexpected label %s" % label)
seen_labels.add(label)
# Record each of the features.
for (fname, fval) in tok.items():
# If a count cutoff is given, then only add a joint
# feature once the corresponding (fname, fval, label)
# tuple exceeds that cutoff.
count[fname, fval] += 1
if count[fname, fval] >= count_cutoff:
if (fname, fval, label) not in mapping:
mapping[fname, fval, label] = len(mapping)
if labels is None:
labels = seen_labels
return cls(labels, mapping, **options)
def calculate_empirical_fcount(train_toks, encoding):
fcount = numpy.zeros(encoding.length(), "d")
for tok, label in train_toks:
for (index, val) in encoding.encode(tok, label):
fcount[index] += val
return fcount
def calculate_nfmap(train_toks, encoding):
"""
Construct a map that can be used to compress ``nf`` (which is
typically sparse).
*nf(feature_vector)* is the sum of the feature values for
*feature_vector*.
This represents the number of features that are active for a
given labeled text. This method finds all values of *nf(t)*
that are attested for at least one token in the given list of
training tokens; and constructs a dictionary mapping these
attested values to a continuous range *0...N*. For example,
if the only values of *nf()* that were attested were 3, 5, and
7, then ``_nfmap`` might return the dictionary ``{3:0, 5:1, 7:2}``.
:return: A map that can be used to compress ``nf`` to a dense
vector.
:rtype: dict(int -> int)
"""
# Map from nf to indices. This allows us to use smaller arrays.
nfset = set()
for tok, _ in train_toks:
for label in encoding.labels():
nfset.add(sum(val for (id, val) in encoding.encode(tok, label)))
return {nf: i for (i, nf) in enumerate(nfset)}
def calculate_deltas(
train_toks,
classifier,
unattested,
ffreq_empirical,
nfmap,
nfarray,
nftranspose,
encoding,
):
r"""
Calculate the update values for the classifier weights for
this iteration of IIS. These update weights are the value of
``delta`` that solves the equation::
ffreq_empirical[i]
=
SUM[fs,l] (classifier.prob_classify(fs).prob(l) *
feature_vector(fs,l)[i] *
exp(delta[i] * nf(feature_vector(fs,l))))
Where:
- *(fs,l)* is a (featureset, label) tuple from ``train_toks``
- *feature_vector(fs,l)* = ``encoding.encode(fs,l)``
- *nf(vector)* = ``sum([val for (id,val) in vector])``
This method uses Newton's method to solve this equation for
*delta[i]*. In particular, it starts with a guess of
``delta[i]`` = 1; and iteratively updates ``delta`` with:
| delta[i] -= (ffreq_empirical[i] - sum1[i])/(-sum2[i])
until convergence, where *sum1* and *sum2* are defined as:
| sum1[i](delta) = SUM[fs,l] f[i](fs,l,delta)
| sum2[i](delta) = SUM[fs,l] (f[i](fs,l,delta).nf(feature_vector(fs,l)))
| f[i](fs,l,delta) = (classifier.prob_classify(fs).prob(l) .
| feature_vector(fs,l)[i] .
| exp(delta[i] . nf(feature_vector(fs,l))))
Note that *sum1* and *sum2* depend on ``delta``; so they need
to be re-computed each iteration.
The variables ``nfmap``, ``nfarray``, and ``nftranspose`` are
used to generate a dense encoding for *nf(ltext)*. This
allows ``_deltas`` to calculate *sum1* and *sum2* using
matrices, which yields a significant performance improvement.
:param train_toks: The set of training tokens.
:type train_toks: list(tuple(dict, str))
:param classifier: The current classifier.
:type classifier: ClassifierI
:param ffreq_empirical: An array containing the empirical
frequency for each feature. The *i*\ th element of this
array is the empirical frequency for feature *i*.
:type ffreq_empirical: sequence of float
:param unattested: An array that is 1 for features that are
not attested in the training data; and 0 for features that
are attested. In other words, ``unattested[i]==0`` iff
``ffreq_empirical[i]==0``.
:type unattested: sequence of int
:param nfmap: A map that can be used to compress ``nf`` to a dense
vector.
:type nfmap: dict(int -> int)
:param nfarray: An array that can be used to uncompress ``nf``
from a dense vector.
:type nfarray: array(float)
:param nftranspose: The transpose of ``nfarray``
:type nftranspose: array(float)
"""
# These parameters control when we decide that we've
# converged. It probably should be possible to set these
# manually, via keyword arguments to train.
NEWTON_CONVERGE = 1e-12
MAX_NEWTON = 300
deltas = numpy.ones(encoding.length(), "d")
# Precompute the A matrix:
# A[nf][id] = sum ( p(fs) * p(label|fs) * f(fs,label) )
# over all label,fs s.t. num_features[label,fs]=nf
A = numpy.zeros((len(nfmap), encoding.length()), "d")
for tok, label in train_toks:
dist = classifier.prob_classify(tok)
for label in encoding.labels():
# Generate the feature vector
feature_vector = encoding.encode(tok, label)
# Find the number of active features
nf = sum(val for (id, val) in feature_vector)
# Update the A matrix
for (id, val) in feature_vector:
A[nfmap[nf], id] += dist.prob(label) * val
A /= len(train_toks)
# Iteratively solve for delta. Use the following variables:
# - nf_delta[x][y] = nfarray[x] * delta[y]
# - exp_nf_delta[x][y] = exp(nf[x] * delta[y])
# - nf_exp_nf_delta[x][y] = nf[x] * exp(nf[x] * delta[y])
# - sum1[i][nf] = sum p(fs)p(label|fs)f[i](label,fs)
# exp(delta[i]nf)
# - sum2[i][nf] = sum p(fs)p(label|fs)f[i](label,fs)
# nf exp(delta[i]nf)
for rangenum in range(MAX_NEWTON):
nf_delta = numpy.outer(nfarray, deltas)
exp_nf_delta = 2**nf_delta
nf_exp_nf_delta = nftranspose * exp_nf_delta
sum1 = numpy.sum(exp_nf_delta * A, axis=0)
sum2 = numpy.sum(nf_exp_nf_delta * A, axis=0)
# Avoid division by zero.
for fid in unattested:
sum2[fid] += 1
# Update the deltas.
deltas -= (ffreq_empirical - sum1) / -sum2
# We can stop once we converge.
n_error = numpy.sum(abs(ffreq_empirical - sum1)) / numpy.sum(abs(deltas))
if n_error < NEWTON_CONVERGE:
return deltas
return deltas
def log_likelihood(classifier, gold):
results = classifier.prob_classify_many([fs for (fs, l) in gold])
ll = [pdist.prob(l) for ((fs, l), pdist) in zip(gold, results)]
return math.log(sum(ll) / len(ll))
def accuracy(classifier, gold):
results = classifier.classify_many([fs for (fs, l) in gold])
correct = [l == r for ((fs, l), r) in zip(gold, results)]
if correct:
return sum(correct) / len(correct)
else:
return 0
class CutoffChecker:
"""
A helper class that implements cutoff checks based on number of
iterations and log likelihood.
Accuracy cutoffs are also implemented, but they're almost never
a good idea to use.
"""
def __init__(self, cutoffs):
self.cutoffs = cutoffs.copy()
if "min_ll" in cutoffs:
cutoffs["min_ll"] = -abs(cutoffs["min_ll"])
if "min_lldelta" in cutoffs:
cutoffs["min_lldelta"] = abs(cutoffs["min_lldelta"])
self.ll = None
self.acc = None
self.iter = 1
def check(self, classifier, train_toks):
cutoffs = self.cutoffs
self.iter += 1
if "max_iter" in cutoffs and self.iter >= cutoffs["max_iter"]:
return True # iteration cutoff.
new_ll = nltk.classify.util.log_likelihood(classifier, train_toks)
if math.isnan(new_ll):
return True
if "min_ll" in cutoffs or "min_lldelta" in cutoffs:
if "min_ll" in cutoffs and new_ll >= cutoffs["min_ll"]:
return True # log likelihood cutoff
if (
"min_lldelta" in cutoffs
and self.ll
and ((new_ll - self.ll) <= abs(cutoffs["min_lldelta"]))
):
return True # log likelihood delta cutoff
self.ll = new_ll
if "max_acc" in cutoffs or "min_accdelta" in cutoffs:
new_acc = nltk.classify.util.log_likelihood(classifier, train_toks)
if "max_acc" in cutoffs and new_acc >= cutoffs["max_acc"]:
return True # log likelihood cutoff
if (
"min_accdelta" in cutoffs
and self.acc
and ((new_acc - self.acc) <= abs(cutoffs["min_accdelta"]))
):
return True # log likelihood delta cutoff
self.acc = new_acc
return False # no cutoff reached.
The provided code snippet includes necessary dependencies for implementing the `train_maxent_classifier_with_iis` function. Write a Python function `def train_maxent_classifier_with_iis( train_toks, trace=3, encoding=None, labels=None, **cutoffs )` to solve the following problem:
Train a new ``ConditionalExponentialClassifier``, using the given training samples, using the Improved Iterative Scaling algorithm. This ``ConditionalExponentialClassifier`` will encode the model that maximizes entropy from all the models that are empirically consistent with ``train_toks``. :see: ``train_maxent_classifier()`` for parameter descriptions.
Here is the function:
def train_maxent_classifier_with_iis(
train_toks, trace=3, encoding=None, labels=None, **cutoffs
):
"""
Train a new ``ConditionalExponentialClassifier``, using the given
training samples, using the Improved Iterative Scaling algorithm.
This ``ConditionalExponentialClassifier`` will encode the model
that maximizes entropy from all the models that are empirically
consistent with ``train_toks``.
:see: ``train_maxent_classifier()`` for parameter descriptions.
"""
cutoffs.setdefault("max_iter", 100)
cutoffchecker = CutoffChecker(cutoffs)
# Construct an encoding from the training data.
if encoding is None:
encoding = BinaryMaxentFeatureEncoding.train(train_toks, labels=labels)
# Count how many times each feature occurs in the training data.
empirical_ffreq = calculate_empirical_fcount(train_toks, encoding) / len(train_toks)
# Find the nf map, and related variables nfarray and nfident.
# nf is the sum of the features for a given labeled text.
# nfmap compresses this sparse set of values to a dense list.
# nfarray performs the reverse operation. nfident is
# nfarray multiplied by an identity matrix.
nfmap = calculate_nfmap(train_toks, encoding)
nfarray = numpy.array(sorted(nfmap, key=nfmap.__getitem__), "d")
nftranspose = numpy.reshape(nfarray, (len(nfarray), 1))
# Check for any features that are not attested in train_toks.
unattested = set(numpy.nonzero(empirical_ffreq == 0)[0])
# Build the classifier. Start with weight=0 for each attested
# feature, and weight=-infinity for each unattested feature.
weights = numpy.zeros(len(empirical_ffreq), "d")
for fid in unattested:
weights[fid] = numpy.NINF
classifier = ConditionalExponentialClassifier(encoding, weights)
if trace > 0:
print(" ==> Training (%d iterations)" % cutoffs["max_iter"])
if trace > 2:
print()
print(" Iteration Log Likelihood Accuracy")
print(" ---------------------------------------")
# Train the classifier.
try:
while True:
if trace > 2:
ll = cutoffchecker.ll or log_likelihood(classifier, train_toks)
acc = cutoffchecker.acc or accuracy(classifier, train_toks)
iternum = cutoffchecker.iter
print(" %9d %14.5f %9.3f" % (iternum, ll, acc))
# Calculate the deltas for this iteration, using Newton's method.
deltas = calculate_deltas(
train_toks,
classifier,
unattested,
empirical_ffreq,
nfmap,
nfarray,
nftranspose,
encoding,
)
# Use the deltas to update our weights.
weights = classifier.weights()
weights += deltas
classifier.set_weights(weights)
# Check the log-likelihood & accuracy cutoffs.
if cutoffchecker.check(classifier, train_toks):
break
except KeyboardInterrupt:
print(" Training stopped: keyboard interrupt")
except:
raise
if trace > 2:
ll = log_likelihood(classifier, train_toks)
acc = accuracy(classifier, train_toks)
print(f" Final {ll:14.5f} {acc:9.3f}")
# Return the classifier.
return classifier | Train a new ``ConditionalExponentialClassifier``, using the given training samples, using the Improved Iterative Scaling algorithm. This ``ConditionalExponentialClassifier`` will encode the model that maximizes entropy from all the models that are empirically consistent with ``train_toks``. :see: ``train_maxent_classifier()`` for parameter descriptions. |
170,855 | try:
import numpy
except ImportError:
pass
import os
import tempfile
from collections import defaultdict
from nltk.classify.api import ClassifierI
from nltk.classify.megam import call_megam, parse_megam_weights, write_megam_file
from nltk.classify.tadm import call_tadm, parse_tadm_weights, write_tadm_file
from nltk.classify.util import CutoffChecker, accuracy, log_likelihood
from nltk.data import gzip_open_unicode
from nltk.probability import DictionaryProbDist
from nltk.util import OrderedDict
class MaxentClassifier(ClassifierI):
"""
A maximum entropy classifier (also known as a "conditional
exponential classifier"). This classifier is parameterized by a
set of "weights", which are used to combine the joint-features
that are generated from a featureset by an "encoding". In
particular, the encoding maps each ``(featureset, label)`` pair to
a vector. The probability of each label is then computed using
the following equation::
dotprod(weights, encode(fs,label))
prob(fs|label) = ---------------------------------------------------
sum(dotprod(weights, encode(fs,l)) for l in labels)
Where ``dotprod`` is the dot product::
dotprod(a,b) = sum(x*y for (x,y) in zip(a,b))
"""
def __init__(self, encoding, weights, logarithmic=True):
"""
Construct a new maxent classifier model. Typically, new
classifier models are created using the ``train()`` method.
:type encoding: MaxentFeatureEncodingI
:param encoding: An encoding that is used to convert the
featuresets that are given to the ``classify`` method into
joint-feature vectors, which are used by the maxent
classifier model.
:type weights: list of float
:param weights: The feature weight vector for this classifier.
:type logarithmic: bool
:param logarithmic: If false, then use non-logarithmic weights.
"""
self._encoding = encoding
self._weights = weights
self._logarithmic = logarithmic
# self._logarithmic = False
assert encoding.length() == len(weights)
def labels(self):
return self._encoding.labels()
def set_weights(self, new_weights):
"""
Set the feature weight vector for this classifier.
:param new_weights: The new feature weight vector.
:type new_weights: list of float
"""
self._weights = new_weights
assert self._encoding.length() == len(new_weights)
def weights(self):
"""
:return: The feature weight vector for this classifier.
:rtype: list of float
"""
return self._weights
def classify(self, featureset):
return self.prob_classify(featureset).max()
def prob_classify(self, featureset):
prob_dict = {}
for label in self._encoding.labels():
feature_vector = self._encoding.encode(featureset, label)
if self._logarithmic:
total = 0.0
for (f_id, f_val) in feature_vector:
total += self._weights[f_id] * f_val
prob_dict[label] = total
else:
prod = 1.0
for (f_id, f_val) in feature_vector:
prod *= self._weights[f_id] ** f_val
prob_dict[label] = prod
# Normalize the dictionary to give a probability distribution
return DictionaryProbDist(prob_dict, log=self._logarithmic, normalize=True)
def explain(self, featureset, columns=4):
"""
Print a table showing the effect of each of the features in
the given feature set, and how they combine to determine the
probabilities of each label for that featureset.
"""
descr_width = 50
TEMPLATE = " %-" + str(descr_width - 2) + "s%s%8.3f"
pdist = self.prob_classify(featureset)
labels = sorted(pdist.samples(), key=pdist.prob, reverse=True)
labels = labels[:columns]
print(
" Feature".ljust(descr_width)
+ "".join("%8s" % (("%s" % l)[:7]) for l in labels)
)
print(" " + "-" * (descr_width - 2 + 8 * len(labels)))
sums = defaultdict(int)
for i, label in enumerate(labels):
feature_vector = self._encoding.encode(featureset, label)
feature_vector.sort(
key=lambda fid__: abs(self._weights[fid__[0]]), reverse=True
)
for (f_id, f_val) in feature_vector:
if self._logarithmic:
score = self._weights[f_id] * f_val
else:
score = self._weights[f_id] ** f_val
descr = self._encoding.describe(f_id)
descr = descr.split(" and label is ")[0] # hack
descr += " (%s)" % f_val # hack
if len(descr) > 47:
descr = descr[:44] + "..."
print(TEMPLATE % (descr, i * 8 * " ", score))
sums[label] += score
print(" " + "-" * (descr_width - 1 + 8 * len(labels)))
print(
" TOTAL:".ljust(descr_width) + "".join("%8.3f" % sums[l] for l in labels)
)
print(
" PROBS:".ljust(descr_width)
+ "".join("%8.3f" % pdist.prob(l) for l in labels)
)
def most_informative_features(self, n=10):
"""
Generates the ranked list of informative features from most to least.
"""
if hasattr(self, "_most_informative_features"):
return self._most_informative_features[:n]
else:
self._most_informative_features = sorted(
list(range(len(self._weights))),
key=lambda fid: abs(self._weights[fid]),
reverse=True,
)
return self._most_informative_features[:n]
def show_most_informative_features(self, n=10, show="all"):
"""
:param show: all, neg, or pos (for negative-only or positive-only)
:type show: str
:param n: The no. of top features
:type n: int
"""
# Use None the full list of ranked features.
fids = self.most_informative_features(None)
if show == "pos":
fids = [fid for fid in fids if self._weights[fid] > 0]
elif show == "neg":
fids = [fid for fid in fids if self._weights[fid] < 0]
for fid in fids[:n]:
print(f"{self._weights[fid]:8.3f} {self._encoding.describe(fid)}")
def __repr__(self):
return "<ConditionalExponentialClassifier: %d labels, %d features>" % (
len(self._encoding.labels()),
self._encoding.length(),
)
#: A list of the algorithm names that are accepted for the
#: ``train()`` method's ``algorithm`` parameter.
ALGORITHMS = ["GIS", "IIS", "MEGAM", "TADM"]
def train(
cls,
train_toks,
algorithm=None,
trace=3,
encoding=None,
labels=None,
gaussian_prior_sigma=0,
**cutoffs,
):
"""
Train a new maxent classifier based on the given corpus of
training samples. This classifier will have its weights
chosen to maximize entropy while remaining empirically
consistent with the training corpus.
:rtype: MaxentClassifier
:return: The new maxent classifier
:type train_toks: list
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a featureset,
and the second of which is a classification label.
:type algorithm: str
:param algorithm: A case-insensitive string, specifying which
algorithm should be used to train the classifier. The
following algorithms are currently available.
- Iterative Scaling Methods: Generalized Iterative Scaling (``'GIS'``),
Improved Iterative Scaling (``'IIS'``)
- External Libraries (requiring megam):
LM-BFGS algorithm, with training performed by Megam (``'megam'``)
The default algorithm is ``'IIS'``.
:type trace: int
:param trace: The level of diagnostic tracing output to produce.
Higher values produce more verbose output.
:type encoding: MaxentFeatureEncodingI
:param encoding: A feature encoding, used to convert featuresets
into feature vectors. If none is specified, then a
``BinaryMaxentFeatureEncoding`` will be built based on the
features that are attested in the training corpus.
:type labels: list(str)
:param labels: The set of possible labels. If none is given, then
the set of all labels attested in the training data will be
used instead.
:param gaussian_prior_sigma: The sigma value for a gaussian
prior on model weights. Currently, this is supported by
``megam``. For other algorithms, its value is ignored.
:param cutoffs: Arguments specifying various conditions under
which the training should be halted. (Some of the cutoff
conditions are not supported by some algorithms.)
- ``max_iter=v``: Terminate after ``v`` iterations.
- ``min_ll=v``: Terminate after the negative average
log-likelihood drops under ``v``.
- ``min_lldelta=v``: Terminate if a single iteration improves
log likelihood by less than ``v``.
"""
if algorithm is None:
algorithm = "iis"
for key in cutoffs:
if key not in (
"max_iter",
"min_ll",
"min_lldelta",
"max_acc",
"min_accdelta",
"count_cutoff",
"norm",
"explicit",
"bernoulli",
):
raise TypeError("Unexpected keyword arg %r" % key)
algorithm = algorithm.lower()
if algorithm == "iis":
return train_maxent_classifier_with_iis(
train_toks, trace, encoding, labels, **cutoffs
)
elif algorithm == "gis":
return train_maxent_classifier_with_gis(
train_toks, trace, encoding, labels, **cutoffs
)
elif algorithm == "megam":
return train_maxent_classifier_with_megam(
train_toks, trace, encoding, labels, gaussian_prior_sigma, **cutoffs
)
elif algorithm == "tadm":
kwargs = cutoffs
kwargs["trace"] = trace
kwargs["encoding"] = encoding
kwargs["labels"] = labels
kwargs["gaussian_prior_sigma"] = gaussian_prior_sigma
return TadmMaxentClassifier.train(train_toks, **kwargs)
else:
raise ValueError("Unknown algorithm %s" % algorithm)
class BinaryMaxentFeatureEncoding(MaxentFeatureEncodingI):
"""
A feature encoding that generates vectors containing a binary
joint-features of the form:
| joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label)
| {
| { 0 otherwise
Where ``fname`` is the name of an input-feature, ``fval`` is a value
for that input-feature, and ``label`` is a label.
Typically, these features are constructed based on a training
corpus, using the ``train()`` method. This method will create one
feature for each combination of ``fname``, ``fval``, and ``label``
that occurs at least once in the training corpus.
The ``unseen_features`` parameter can be used to add "unseen-value
features", which are used whenever an input feature has a value
that was not encountered in the training corpus. These features
have the form:
| joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname])
| { and l == label
| {
| { 0 otherwise
Where ``is_unseen(fname, fval)`` is true if the encoding does not
contain any joint features that are true when ``fs[fname]==fval``.
The ``alwayson_features`` parameter can be used to add "always-on
features", which have the form::
| joint_feat(fs, l) = { 1 if (l == label)
| {
| { 0 otherwise
These always-on features allow the maxent model to directly model
the prior probabilities of each label.
"""
def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False):
"""
:param labels: A list of the \"known labels\" for this encoding.
:param mapping: A dictionary mapping from ``(fname,fval,label)``
tuples to corresponding joint-feature indexes. These
indexes must be the set of integers from 0...len(mapping).
If ``mapping[fname,fval,label]=id``, then
``self.encode(..., fname:fval, ..., label)[id]`` is 1;
otherwise, it is 0.
:param unseen_features: If true, then include unseen value
features in the generated joint-feature vectors.
:param alwayson_features: If true, then include always-on
features in the generated joint-feature vectors.
"""
if set(mapping.values()) != set(range(len(mapping))):
raise ValueError(
"Mapping values must be exactly the "
"set of integers from 0...len(mapping)"
)
self._labels = list(labels)
"""A list of attested labels."""
self._mapping = mapping
"""dict mapping from (fname,fval,label) -> fid"""
self._length = len(mapping)
"""The length of generated joint feature vectors."""
self._alwayson = None
"""dict mapping from label -> fid"""
self._unseen = None
"""dict mapping from fname -> fid"""
if alwayson_features:
self._alwayson = {
label: i + self._length for (i, label) in enumerate(labels)
}
self._length += len(self._alwayson)
if unseen_features:
fnames = {fname for (fname, fval, label) in mapping}
self._unseen = {fname: i + self._length for (i, fname) in enumerate(fnames)}
self._length += len(fnames)
def encode(self, featureset, label):
# Inherit docs.
encoding = []
# Convert input-features to joint-features:
for fname, fval in featureset.items():
# Known feature name & value:
if (fname, fval, label) in self._mapping:
encoding.append((self._mapping[fname, fval, label], 1))
# Otherwise, we might want to fire an "unseen-value feature".
elif self._unseen:
# Have we seen this fname/fval combination with any label?
for label2 in self._labels:
if (fname, fval, label2) in self._mapping:
break # we've seen this fname/fval combo
# We haven't -- fire the unseen-value feature
else:
if fname in self._unseen:
encoding.append((self._unseen[fname], 1))
# Add always-on features:
if self._alwayson and label in self._alwayson:
encoding.append((self._alwayson[label], 1))
return encoding
def describe(self, f_id):
# Inherit docs.
if not isinstance(f_id, int):
raise TypeError("describe() expected an int")
try:
self._inv_mapping
except AttributeError:
self._inv_mapping = [-1] * len(self._mapping)
for (info, i) in self._mapping.items():
self._inv_mapping[i] = info
if f_id < len(self._mapping):
(fname, fval, label) = self._inv_mapping[f_id]
return f"{fname}=={fval!r} and label is {label!r}"
elif self._alwayson and f_id in self._alwayson.values():
for (label, f_id2) in self._alwayson.items():
if f_id == f_id2:
return "label is %r" % label
elif self._unseen and f_id in self._unseen.values():
for (fname, f_id2) in self._unseen.items():
if f_id == f_id2:
return "%s is unseen" % fname
else:
raise ValueError("Bad feature id")
def labels(self):
# Inherit docs.
return self._labels
def length(self):
# Inherit docs.
return self._length
def train(cls, train_toks, count_cutoff=0, labels=None, **options):
"""
Construct and return new feature encoding, based on a given
training corpus ``train_toks``. See the class description
``BinaryMaxentFeatureEncoding`` for a description of the
joint-features that will be included in this encoding.
:type train_toks: list(tuple(dict, str))
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
:type count_cutoff: int
:param count_cutoff: A cutoff value that is used to discard
rare joint-features. If a joint-feature's value is 1
fewer than ``count_cutoff`` times in the training corpus,
then that joint-feature is not included in the generated
encoding.
:type labels: list
:param labels: A list of labels that should be used by the
classifier. If not specified, then the set of labels
attested in ``train_toks`` will be used.
:param options: Extra parameters for the constructor, such as
``unseen_features`` and ``alwayson_features``.
"""
mapping = {} # maps (fname, fval, label) -> fid
seen_labels = set() # The set of labels we've encountered
count = defaultdict(int) # maps (fname, fval) -> count
for (tok, label) in train_toks:
if labels and label not in labels:
raise ValueError("Unexpected label %s" % label)
seen_labels.add(label)
# Record each of the features.
for (fname, fval) in tok.items():
# If a count cutoff is given, then only add a joint
# feature once the corresponding (fname, fval, label)
# tuple exceeds that cutoff.
count[fname, fval] += 1
if count[fname, fval] >= count_cutoff:
if (fname, fval, label) not in mapping:
mapping[fname, fval, label] = len(mapping)
if labels is None:
labels = seen_labels
return cls(labels, mapping, **options)
def write_megam_file(train_toks, encoding, stream, bernoulli=True, explicit=True):
"""
Generate an input file for ``megam`` based on the given corpus of
classified tokens.
:type train_toks: list(tuple(dict, str))
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
:type encoding: MaxentFeatureEncodingI
:param encoding: A feature encoding, used to convert featuresets
into feature vectors. May optionally implement a cost() method
in order to assign different costs to different class predictions.
:type stream: stream
:param stream: The stream to which the megam input file should be
written.
:param bernoulli: If true, then use the 'bernoulli' format. I.e.,
all joint features have binary values, and are listed iff they
are true. Otherwise, list feature values explicitly. If
``bernoulli=False``, then you must call ``megam`` with the
``-fvals`` option.
:param explicit: If true, then use the 'explicit' format. I.e.,
list the features that would fire for any of the possible
labels, for each token. If ``explicit=True``, then you must
call ``megam`` with the ``-explicit`` option.
"""
# Look up the set of labels.
labels = encoding.labels()
labelnum = {label: i for (i, label) in enumerate(labels)}
# Write the file, which contains one line per instance.
for featureset, label in train_toks:
# First, the instance number (or, in the weighted multiclass case, the cost of each label).
if hasattr(encoding, "cost"):
stream.write(
":".join(str(encoding.cost(featureset, label, l)) for l in labels)
)
else:
stream.write("%d" % labelnum[label])
# For implicit file formats, just list the features that fire
# for this instance's actual label.
if not explicit:
_write_megam_features(encoding.encode(featureset, label), stream, bernoulli)
# For explicit formats, list the features that would fire for
# any of the possible labels.
else:
for l in labels:
stream.write(" #")
_write_megam_features(encoding.encode(featureset, l), stream, bernoulli)
# End of the instance.
stream.write("\n")
def parse_megam_weights(s, features_count, explicit=True):
"""
Given the stdout output generated by ``megam`` when training a
model, return a ``numpy`` array containing the corresponding weight
vector. This function does not currently handle bias features.
"""
if numpy is None:
raise ValueError("This function requires that numpy be installed")
assert explicit, "non-explicit not supported yet"
lines = s.strip().split("\n")
weights = numpy.zeros(features_count, "d")
for line in lines:
if line.strip():
fid, weight = line.split()
weights[int(fid)] = float(weight)
return weights
def call_megam(args):
"""
Call the ``megam`` binary with the given arguments.
"""
if isinstance(args, str):
raise TypeError("args should be a list of strings")
if _megam_bin is None:
config_megam()
# Call megam via a subprocess
cmd = [_megam_bin] + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
# Check the return code.
if p.returncode != 0:
print()
print(stderr)
raise OSError("megam command failed!")
if isinstance(stdout, str):
return stdout
else:
return stdout.decode("utf-8")
The provided code snippet includes necessary dependencies for implementing the `train_maxent_classifier_with_megam` function. Write a Python function `def train_maxent_classifier_with_megam( train_toks, trace=3, encoding=None, labels=None, gaussian_prior_sigma=0, **kwargs )` to solve the following problem:
Train a new ``ConditionalExponentialClassifier``, using the given training samples, using the external ``megam`` library. This ``ConditionalExponentialClassifier`` will encode the model that maximizes entropy from all the models that are empirically consistent with ``train_toks``. :see: ``train_maxent_classifier()`` for parameter descriptions. :see: ``nltk.classify.megam``
Here is the function:
def train_maxent_classifier_with_megam(
train_toks, trace=3, encoding=None, labels=None, gaussian_prior_sigma=0, **kwargs
):
"""
Train a new ``ConditionalExponentialClassifier``, using the given
training samples, using the external ``megam`` library. This
``ConditionalExponentialClassifier`` will encode the model that
maximizes entropy from all the models that are empirically
consistent with ``train_toks``.
:see: ``train_maxent_classifier()`` for parameter descriptions.
:see: ``nltk.classify.megam``
"""
explicit = True
bernoulli = True
if "explicit" in kwargs:
explicit = kwargs["explicit"]
if "bernoulli" in kwargs:
bernoulli = kwargs["bernoulli"]
# Construct an encoding from the training data.
if encoding is None:
# Count cutoff can also be controlled by megam with the -minfc
# option. Not sure where the best place for it is.
count_cutoff = kwargs.get("count_cutoff", 0)
encoding = BinaryMaxentFeatureEncoding.train(
train_toks, count_cutoff, labels=labels, alwayson_features=True
)
elif labels is not None:
raise ValueError("Specify encoding or labels, not both")
# Write a training file for megam.
try:
fd, trainfile_name = tempfile.mkstemp(prefix="nltk-")
with open(trainfile_name, "w") as trainfile:
write_megam_file(
train_toks, encoding, trainfile, explicit=explicit, bernoulli=bernoulli
)
os.close(fd)
except (OSError, ValueError) as e:
raise ValueError("Error while creating megam training file: %s" % e) from e
# Run megam on the training file.
options = []
options += ["-nobias", "-repeat", "10"]
if explicit:
options += ["-explicit"]
if not bernoulli:
options += ["-fvals"]
if gaussian_prior_sigma:
# Lambda is just the precision of the Gaussian prior, i.e. it's the
# inverse variance, so the parameter conversion is 1.0/sigma**2.
# See https://users.umiacs.umd.edu/~hal/docs/daume04cg-bfgs.pdf
inv_variance = 1.0 / gaussian_prior_sigma**2
else:
inv_variance = 0
options += ["-lambda", "%.2f" % inv_variance, "-tune"]
if trace < 3:
options += ["-quiet"]
if "max_iter" in kwargs:
options += ["-maxi", "%s" % kwargs["max_iter"]]
if "ll_delta" in kwargs:
# [xx] this is actually a perplexity delta, not a log
# likelihood delta
options += ["-dpp", "%s" % abs(kwargs["ll_delta"])]
if hasattr(encoding, "cost"):
options += ["-multilabel"] # each possible la
options += ["multiclass", trainfile_name]
stdout = call_megam(options)
# print('./megam_i686.opt ', ' '.join(options))
# Delete the training file
try:
os.remove(trainfile_name)
except OSError as e:
print(f"Warning: unable to delete {trainfile_name}: {e}")
# Parse the generated weight vector.
weights = parse_megam_weights(stdout, encoding.length(), explicit)
# Convert from base-e to base-2 weights.
weights *= numpy.log2(numpy.e)
# Build the classifier
return MaxentClassifier(encoding, weights) | Train a new ``ConditionalExponentialClassifier``, using the given training samples, using the external ``megam`` library. This ``ConditionalExponentialClassifier`` will encode the model that maximizes entropy from all the models that are empirically consistent with ``train_toks``. :see: ``train_maxent_classifier()`` for parameter descriptions. :see: ``nltk.classify.megam`` |
170,856 | import os
import tempfile
from collections import defaultdict
from nltk.classify.api import ClassifierI
from nltk.classify.megam import call_megam, parse_megam_weights, write_megam_file
from nltk.classify.tadm import call_tadm, parse_tadm_weights, write_tadm_file
from nltk.classify.util import CutoffChecker, accuracy, log_likelihood
from nltk.data import gzip_open_unicode
from nltk.probability import DictionaryProbDist
from nltk.util import OrderedDict
class MaxentClassifier(ClassifierI):
def __init__(self, encoding, weights, logarithmic=True):
def labels(self):
def set_weights(self, new_weights):
def weights(self):
def classify(self, featureset):
def prob_classify(self, featureset):
def explain(self, featureset, columns=4):
def most_informative_features(self, n=10):
def show_most_informative_features(self, n=10, show="all"):
def __repr__(self):
def train(
cls,
train_toks,
algorithm=None,
trace=3,
encoding=None,
labels=None,
gaussian_prior_sigma=0,
**cutoffs,
):
def names_demo(trainer, features=names_demo_features):
def demo():
from nltk.classify.util import names_demo
classifier = names_demo(MaxentClassifier.train) | null |
170,857 | import subprocess
import sys
from nltk.internals import find_binary
try:
import numpy
except ImportError:
pass
The provided code snippet includes necessary dependencies for implementing the `parse_tadm_weights` function. Write a Python function `def parse_tadm_weights(paramfile)` to solve the following problem:
Given the stdout output generated by ``tadm`` when training a model, return a ``numpy`` array containing the corresponding weight vector.
Here is the function:
def parse_tadm_weights(paramfile):
"""
Given the stdout output generated by ``tadm`` when training a
model, return a ``numpy`` array containing the corresponding weight
vector.
"""
weights = []
for line in paramfile:
weights.append(float(line.strip()))
return numpy.array(weights, "d") | Given the stdout output generated by ``tadm`` when training a model, return a ``numpy`` array containing the corresponding weight vector. |
170,858 | import subprocess
import sys
from nltk.internals import find_binary
_tadm_bin = None
def config_tadm(bin=None):
global _tadm_bin
_tadm_bin = find_binary(
"tadm", bin, env_vars=["TADM"], binary_names=["tadm"], url="http://tadm.sf.net"
)
The provided code snippet includes necessary dependencies for implementing the `call_tadm` function. Write a Python function `def call_tadm(args)` to solve the following problem:
Call the ``tadm`` binary with the given arguments.
Here is the function:
def call_tadm(args):
"""
Call the ``tadm`` binary with the given arguments.
"""
if isinstance(args, str):
raise TypeError("args should be a list of strings")
if _tadm_bin is None:
config_tadm()
# Call tadm via a subprocess
cmd = [_tadm_bin] + args
p = subprocess.Popen(cmd, stdout=sys.stdout)
(stdout, stderr) = p.communicate()
# Check the return code.
if p.returncode != 0:
print()
print(stderr)
raise OSError("tadm command failed!") | Call the ``tadm`` binary with the given arguments. |
170,859 | import subprocess
import sys
from nltk.internals import find_binary
def write_tadm_file(train_toks, encoding, stream):
"""
Generate an input file for ``tadm`` based on the given corpus of
classified tokens.
:type train_toks: list(tuple(dict, str))
:param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
:type encoding: TadmEventMaxentFeatureEncoding
:param encoding: A feature encoding, used to convert featuresets
into feature vectors.
:type stream: stream
:param stream: The stream to which the ``tadm`` input file should be
written.
"""
# See the following for a file format description:
#
# https://sf.net/forum/forum.php?thread_id=1391502&forum_id=473054
# https://sf.net/forum/forum.php?thread_id=1675097&forum_id=473054
labels = encoding.labels()
for featureset, label in train_toks:
length_line = "%d\n" % len(labels)
stream.write(length_line)
for known_label in labels:
v = encoding.encode(featureset, known_label)
line = "%d %d %s\n" % (
int(label == known_label),
len(v),
" ".join("%d %d" % u for u in v),
)
stream.write(line)
class TadmEventMaxentFeatureEncoding(BinaryMaxentFeatureEncoding):
def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False):
self._mapping = OrderedDict(mapping)
self._label_mapping = OrderedDict()
BinaryMaxentFeatureEncoding.__init__(
self, labels, self._mapping, unseen_features, alwayson_features
)
def encode(self, featureset, label):
encoding = []
for feature, value in featureset.items():
if (feature, label) not in self._mapping:
self._mapping[(feature, label)] = len(self._mapping)
if value not in self._label_mapping:
if not isinstance(value, int):
self._label_mapping[value] = len(self._label_mapping)
else:
self._label_mapping[value] = value
encoding.append(
(self._mapping[(feature, label)], self._label_mapping[value])
)
return encoding
def labels(self):
return self._labels
def describe(self, fid):
for (feature, label) in self._mapping:
if self._mapping[(feature, label)] == fid:
return (feature, label)
def length(self):
return len(self._mapping)
def train(cls, train_toks, count_cutoff=0, labels=None, **options):
mapping = OrderedDict()
if not labels:
labels = []
# This gets read twice, so compute the values in case it's lazy.
train_toks = list(train_toks)
for (featureset, label) in train_toks:
if label not in labels:
labels.append(label)
for (featureset, label) in train_toks:
for label in labels:
for feature in featureset:
if (feature, label) not in mapping:
mapping[(feature, label)] = len(mapping)
return cls(labels, mapping, **options)
def encoding_demo():
import sys
from nltk.classify.maxent import TadmEventMaxentFeatureEncoding
tokens = [
({"f0": 1, "f1": 1, "f3": 1}, "A"),
({"f0": 1, "f2": 1, "f4": 1}, "B"),
({"f0": 2, "f2": 1, "f3": 1, "f4": 1}, "A"),
]
encoding = TadmEventMaxentFeatureEncoding.train(tokens)
write_tadm_file(tokens, encoding, sys.stdout)
print()
for i in range(encoding.length()):
print("%s --> %d" % (encoding.describe(i), i))
print() | null |
170,860 | from collections import defaultdict
from nltk.classify.api import ClassifierI
from nltk.probability import FreqDist, MLEProbDist, entropy
def f(x):
return DecisionTreeClassifier.train(x, binary=True, verbose=True)
def binary_names_demo_features(name):
features = {}
features["alwayson"] = True
features["startswith(vowel)"] = name[0].lower() in "aeiouy"
features["endswith(vowel)"] = name[-1].lower() in "aeiouy"
for letter in "abcdefghijklmnopqrstuvwxyz":
features["count(%s)" % letter] = name.lower().count(letter)
features["has(%s)" % letter] = letter in name.lower()
features["startswith(%s)" % letter] = letter == name[0].lower()
features["endswith(%s)" % letter] = letter == name[-1].lower()
return features
def names_demo(trainer, features=names_demo_features):
import random
from nltk.corpus import names
# Construct a list of classified names, using the names corpus.
namelist = [(name, "male") for name in names.words("male.txt")] + [
(name, "female") for name in names.words("female.txt")
]
# Randomly split the names into a test & train set.
random.seed(123456)
random.shuffle(namelist)
train = namelist[:5000]
test = namelist[5000:5500]
# Train up a classifier.
print("Training classifier...")
classifier = trainer([(features(n), g) for (n, g) in train])
# Run the classifier on the test data.
print("Testing classifier...")
acc = accuracy(classifier, [(features(n), g) for (n, g) in test])
print("Accuracy: %6.4f" % acc)
# For classifiers that can find probabilities, show the log
# likelihood and some sample probability distributions.
try:
test_featuresets = [features(n) for (n, g) in test]
pdists = classifier.prob_classify_many(test_featuresets)
ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)]
print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test)))
print()
print("Unseen Names P(Male) P(Female)\n" + "-" * 40)
for ((name, gender), pdist) in list(zip(test, pdists))[:5]:
if gender == "male":
fmt = " %-15s *%6.4f %6.4f"
else:
fmt = " %-15s %6.4f *%6.4f"
print(fmt % (name, pdist.prob("male"), pdist.prob("female")))
except NotImplementedError:
pass
# Return the classifier
return classifier
def demo():
from nltk.classify.util import binary_names_demo_features, names_demo
classifier = names_demo(
f, binary_names_demo_features # DecisionTreeClassifier.train,
)
print(classifier.pretty_format(depth=7))
print(classifier.pseudocode(depth=7)) | null |
170,861 | from collections import defaultdict
from nltk.classify.api import ClassifierI
from nltk.probability import DictionaryProbDist, ELEProbDist, FreqDist, sum_logs
class NaiveBayesClassifier(ClassifierI):
def __init__(self, label_probdist, feature_probdist):
def labels(self):
def classify(self, featureset):
def prob_classify(self, featureset):
def show_most_informative_features(self, n=10):
def labelprob(l):
def most_informative_features(self, n=100):
def train(cls, labeled_featuresets, estimator=ELEProbDist):
def names_demo(trainer, features=names_demo_features):
def demo():
from nltk.classify.util import names_demo
classifier = names_demo(NaiveBayesClassifier.train)
classifier.show_most_informative_features() | null |
170,862 | import os
import re
import subprocess
import tempfile
import time
import zipfile
from sys import stdin
from nltk.classify.api import ClassifierI
from nltk.internals import config_java, java
from nltk.probability import DictionaryProbDist
_weka_classpath = None
_weka_search = [
".",
"/usr/share/weka",
"/usr/local/share/weka",
"/usr/lib/weka",
"/usr/local/lib/weka",
]
def _check_weka_version(jar):
try:
zf = zipfile.ZipFile(jar)
except (SystemExit, KeyboardInterrupt):
raise
except:
return None
try:
try:
return zf.read("weka/core/version.txt")
except KeyError:
return None
finally:
zf.close()
def config_java(bin=None, options=None, verbose=False):
"""
Configure nltk's java interface, by letting nltk know where it can
find the Java binary, and what extra options (if any) should be
passed to Java when it is run.
:param bin: The full path to the Java binary. If not specified,
then nltk will search the system for a Java binary; and if
one is not found, it will raise a ``LookupError`` exception.
:type bin: str
:param options: A list of options that should be passed to the
Java binary when it is called. A common value is
``'-Xmx512m'``, which tells Java binary to increase
the maximum heap size to 512 megabytes. If no options are
specified, then do not modify the options list.
:type options: list(str)
"""
global _java_bin, _java_options
_java_bin = find_binary(
"java",
bin,
env_vars=["JAVAHOME", "JAVA_HOME"],
verbose=verbose,
binary_names=["java.exe"],
)
if options is not None:
if isinstance(options, str):
options = options.split()
_java_options = list(options)
def config_weka(classpath=None):
global _weka_classpath
# Make sure java's configured first.
config_java()
if classpath is not None:
_weka_classpath = classpath
if _weka_classpath is None:
searchpath = _weka_search
if "WEKAHOME" in os.environ:
searchpath.insert(0, os.environ["WEKAHOME"])
for path in searchpath:
if os.path.exists(os.path.join(path, "weka.jar")):
_weka_classpath = os.path.join(path, "weka.jar")
version = _check_weka_version(_weka_classpath)
if version:
print(f"[Found Weka: {_weka_classpath} (version {version})]")
else:
print("[Found Weka: %s]" % _weka_classpath)
_check_weka_version(_weka_classpath)
if _weka_classpath is None:
raise LookupError(
"Unable to find weka.jar! Use config_weka() "
"or set the WEKAHOME environment variable. "
"For more information about Weka, please see "
"https://www.cs.waikato.ac.nz/ml/weka/"
) | null |
170,863 | import os
import re
import subprocess
import tempfile
import time
import zipfile
from sys import stdin
from nltk.classify.api import ClassifierI
from nltk.internals import config_java, java
from nltk.probability import DictionaryProbDist
class WekaClassifier(ClassifierI):
def __init__(self, formatter, model_filename):
self._formatter = formatter
self._model = model_filename
def prob_classify_many(self, featuresets):
return self._classify_many(featuresets, ["-p", "0", "-distribution"])
def classify_many(self, featuresets):
return self._classify_many(featuresets, ["-p", "0"])
def _classify_many(self, featuresets, options):
# Make sure we can find java & weka.
config_weka()
temp_dir = tempfile.mkdtemp()
try:
# Write the test data file.
test_filename = os.path.join(temp_dir, "test.arff")
self._formatter.write(test_filename, featuresets)
# Call weka to classify the data.
cmd = [
"weka.classifiers.bayes.NaiveBayes",
"-l",
self._model,
"-T",
test_filename,
] + options
(stdout, stderr) = java(
cmd,
classpath=_weka_classpath,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# Check if something went wrong:
if stderr and not stdout:
if "Illegal options: -distribution" in stderr:
raise ValueError(
"The installed version of weka does "
"not support probability distribution "
"output."
)
else:
raise ValueError("Weka failed to generate output:\n%s" % stderr)
# Parse weka's output.
return self.parse_weka_output(stdout.decode(stdin.encoding).split("\n"))
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
def parse_weka_distribution(self, s):
probs = [float(v) for v in re.split("[*,]+", s) if v.strip()]
probs = dict(zip(self._formatter.labels(), probs))
return DictionaryProbDist(probs)
def parse_weka_output(self, lines):
# Strip unwanted text from stdout
for i, line in enumerate(lines):
if line.strip().startswith("inst#"):
lines = lines[i:]
break
if lines[0].split() == ["inst#", "actual", "predicted", "error", "prediction"]:
return [line.split()[2].split(":")[1] for line in lines[1:] if line.strip()]
elif lines[0].split() == [
"inst#",
"actual",
"predicted",
"error",
"distribution",
]:
return [
self.parse_weka_distribution(line.split()[-1])
for line in lines[1:]
if line.strip()
]
# is this safe:?
elif re.match(r"^0 \w+ [01]\.[0-9]* \?\s*$", lines[0]):
return [line.split()[1] for line in lines if line.strip()]
else:
for line in lines[:10]:
print(line)
raise ValueError(
"Unhandled output format -- your version "
"of weka may not be supported.\n"
" Header: %s" % lines[0]
)
# [xx] full list of classifiers (some may be abstract?):
# ADTree, AODE, BayesNet, ComplementNaiveBayes, ConjunctiveRule,
# DecisionStump, DecisionTable, HyperPipes, IB1, IBk, Id3, J48,
# JRip, KStar, LBR, LeastMedSq, LinearRegression, LMT, Logistic,
# LogisticBase, M5Base, MultilayerPerceptron,
# MultipleClassifiersCombiner, NaiveBayes, NaiveBayesMultinomial,
# NaiveBayesSimple, NBTree, NNge, OneR, PaceRegression, PART,
# PreConstructedLinearModel, Prism, RandomForest,
# RandomizableClassifier, RandomTree, RBFNetwork, REPTree, Ridor,
# RuleNode, SimpleLinearRegression, SimpleLogistic,
# SingleClassifierEnhancer, SMO, SMOreg, UserClassifier, VFI,
# VotedPerceptron, Winnow, ZeroR
_CLASSIFIER_CLASS = {
"naivebayes": "weka.classifiers.bayes.NaiveBayes",
"C4.5": "weka.classifiers.trees.J48",
"log_regression": "weka.classifiers.functions.Logistic",
"svm": "weka.classifiers.functions.SMO",
"kstar": "weka.classifiers.lazy.KStar",
"ripper": "weka.classifiers.rules.JRip",
}
def train(
cls,
model_filename,
featuresets,
classifier="naivebayes",
options=[],
quiet=True,
):
# Make sure we can find java & weka.
config_weka()
# Build an ARFF formatter.
formatter = ARFF_Formatter.from_train(featuresets)
temp_dir = tempfile.mkdtemp()
try:
# Write the training data file.
train_filename = os.path.join(temp_dir, "train.arff")
formatter.write(train_filename, featuresets)
if classifier in cls._CLASSIFIER_CLASS:
javaclass = cls._CLASSIFIER_CLASS[classifier]
elif classifier in cls._CLASSIFIER_CLASS.values():
javaclass = classifier
else:
raise ValueError("Unknown classifier %s" % classifier)
# Train the weka model.
cmd = [javaclass, "-d", model_filename, "-t", train_filename]
cmd += list(options)
if quiet:
stdout = subprocess.PIPE
else:
stdout = None
java(cmd, classpath=_weka_classpath, stdout=stdout)
# Return the new classifier.
return WekaClassifier(formatter, model_filename)
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
def make_classifier(featuresets):
return WekaClassifier.train("/tmp/name.model", featuresets, "C4.5") | null |
170,864 | from matplotlib import pylab
from nltk.corpus import gutenberg
from nltk.text import Text
def plot_word_freq_dist(text):
fd = text.vocab()
samples = [item for item, _ in fd.most_common(50)]
values = [fd[sample] for sample in samples]
values = [sum(values[: i + 1]) * 100.0 / fd.N() for i in range(len(values))]
pylab.title(text.name)
pylab.xlabel("Samples")
pylab.ylabel("Cumulative Percentage")
pylab.plot(values)
pylab.xticks(range(len(samples)), [str(s) for s in samples], rotation=90)
pylab.show()
gutenberg: PlaintextCorpusReader = LazyCorpusLoader(
"gutenberg", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1"
)
class Text:
"""
A wrapper around a sequence of simple (string) tokens, which is
intended to support initial exploration of texts (via the
interactive console). Its methods perform a variety of analyses
on the text's contexts (e.g., counting, concordancing, collocation
discovery), and display the results. If you wish to write a
program which makes use of these analyses, then you should bypass
the ``Text`` class, and use the appropriate analysis function or
class directly instead.
A ``Text`` is typically initialized from a given document or
corpus. E.g.:
>>> import nltk.corpus
>>> from nltk.text import Text
>>> moby = Text(nltk.corpus.gutenberg.words('melville-moby_dick.txt'))
"""
# This defeats lazy loading, but makes things faster. This
# *shouldn't* be necessary because the corpus view *should* be
# doing intelligent caching, but without this it's running slow.
# Look into whether the caching is working correctly.
_COPY_TOKENS = True
def __init__(self, tokens, name=None):
"""
Create a Text object.
:param tokens: The source text.
:type tokens: sequence of str
"""
if self._COPY_TOKENS:
tokens = list(tokens)
self.tokens = tokens
if name:
self.name = name
elif "]" in tokens[:20]:
end = tokens[:20].index("]")
self.name = " ".join(str(tok) for tok in tokens[1:end])
else:
self.name = " ".join(str(tok) for tok in tokens[:8]) + "..."
# ////////////////////////////////////////////////////////////
# Support item & slice access
# ////////////////////////////////////////////////////////////
def __getitem__(self, i):
return self.tokens[i]
def __len__(self):
return len(self.tokens)
# ////////////////////////////////////////////////////////////
# Interactive console methods
# ////////////////////////////////////////////////////////////
def concordance(self, word, width=79, lines=25):
"""
Prints a concordance for ``word`` with the specified context window.
Word matching is not case-sensitive.
:param word: The target word or phrase (a list of strings)
:type word: str or list
:param width: The width of each line, in characters (default=80)
:type width: int
:param lines: The number of lines to display (default=25)
:type lines: int
:seealso: ``ConcordanceIndex``
"""
if "_concordance_index" not in self.__dict__:
self._concordance_index = ConcordanceIndex(
self.tokens, key=lambda s: s.lower()
)
return self._concordance_index.print_concordance(word, width, lines)
def concordance_list(self, word, width=79, lines=25):
"""
Generate a concordance for ``word`` with the specified context window.
Word matching is not case-sensitive.
:param word: The target word or phrase (a list of strings)
:type word: str or list
:param width: The width of each line, in characters (default=80)
:type width: int
:param lines: The number of lines to display (default=25)
:type lines: int
:seealso: ``ConcordanceIndex``
"""
if "_concordance_index" not in self.__dict__:
self._concordance_index = ConcordanceIndex(
self.tokens, key=lambda s: s.lower()
)
return self._concordance_index.find_concordance(word, width)[:lines]
def collocation_list(self, num=20, window_size=2):
"""
Return collocations derived from the text, ignoring stopwords.
>>> from nltk.book import text4
>>> text4.collocation_list()[:2]
[('United', 'States'), ('fellow', 'citizens')]
:param num: The maximum number of collocations to return.
:type num: int
:param window_size: The number of tokens spanned by a collocation (default=2)
:type window_size: int
:rtype: list(tuple(str, str))
"""
if not (
"_collocations" in self.__dict__
and self._num == num
and self._window_size == window_size
):
self._num = num
self._window_size = window_size
# print("Building collocations list")
from nltk.corpus import stopwords
ignored_words = stopwords.words("english")
finder = BigramCollocationFinder.from_words(self.tokens, window_size)
finder.apply_freq_filter(2)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
bigram_measures = BigramAssocMeasures()
self._collocations = list(
finder.nbest(bigram_measures.likelihood_ratio, num)
)
return self._collocations
def collocations(self, num=20, window_size=2):
"""
Print collocations derived from the text, ignoring stopwords.
>>> from nltk.book import text4
>>> text4.collocations() # doctest: +NORMALIZE_WHITESPACE
United States; fellow citizens; years ago; four years; Federal
Government; General Government; American people; Vice President; God
bless; Chief Justice; one another; fellow Americans; Old World;
Almighty God; Fellow citizens; Chief Magistrate; every citizen; Indian
tribes; public debt; foreign nations
:param num: The maximum number of collocations to print.
:type num: int
:param window_size: The number of tokens spanned by a collocation (default=2)
:type window_size: int
"""
collocation_strings = [
w1 + " " + w2 for w1, w2 in self.collocation_list(num, window_size)
]
print(tokenwrap(collocation_strings, separator="; "))
def count(self, word):
"""
Count the number of times this word appears in the text.
"""
return self.tokens.count(word)
def index(self, word):
"""
Find the index of the first occurrence of the word in the text.
"""
return self.tokens.index(word)
def readability(self, method):
# code from nltk_contrib.readability
raise NotImplementedError
def similar(self, word, num=20):
"""
Distributional similarity: find other words which appear in the
same contexts as the specified word; list most similar words first.
:param word: The word used to seed the similarity search
:type word: str
:param num: The number of words to generate (default=20)
:type num: int
:seealso: ContextIndex.similar_words()
"""
if "_word_context_index" not in self.__dict__:
# print('Building word-context index...')
self._word_context_index = ContextIndex(
self.tokens, filter=lambda x: x.isalpha(), key=lambda s: s.lower()
)
# words = self._word_context_index.similar_words(word, num)
word = word.lower()
wci = self._word_context_index._word_to_contexts
if word in wci.conditions():
contexts = set(wci[word])
fd = Counter(
w
for w in wci.conditions()
for c in wci[w]
if c in contexts and not w == word
)
words = [w for w, _ in fd.most_common(num)]
print(tokenwrap(words))
else:
print("No matches")
def common_contexts(self, words, num=20):
"""
Find contexts where the specified words appear; list
most frequent common contexts first.
:param words: The words used to seed the similarity search
:type words: str
:param num: The number of words to generate (default=20)
:type num: int
:seealso: ContextIndex.common_contexts()
"""
if "_word_context_index" not in self.__dict__:
# print('Building word-context index...')
self._word_context_index = ContextIndex(
self.tokens, key=lambda s: s.lower()
)
try:
fd = self._word_context_index.common_contexts(words, True)
if not fd:
print("No common contexts were found")
else:
ranked_contexts = [w for w, _ in fd.most_common(num)]
print(tokenwrap(w1 + "_" + w2 for w1, w2 in ranked_contexts))
except ValueError as e:
print(e)
def dispersion_plot(self, words):
"""
Produce a plot showing the distribution of the words through the text.
Requires pylab to be installed.
:param words: The words to be plotted
:type words: list(str)
:seealso: nltk.draw.dispersion_plot()
"""
from nltk.draw import dispersion_plot
dispersion_plot(self, words)
def _train_default_ngram_lm(self, tokenized_sents, n=3):
train_data, padded_sents = padded_everygram_pipeline(n, tokenized_sents)
model = MLE(order=n)
model.fit(train_data, padded_sents)
return model
def generate(self, length=100, text_seed=None, random_seed=42):
"""
Print random text, generated using a trigram language model.
See also `help(nltk.lm)`.
:param length: The length of text to generate (default=100)
:type length: int
:param text_seed: Generation can be conditioned on preceding context.
:type text_seed: list(str)
:param random_seed: A random seed or an instance of `random.Random`. If provided,
makes the random sampling part of generation reproducible. (default=42)
:type random_seed: int
"""
# Create the model when using it the first time.
self._tokenized_sents = [
sent.split(" ") for sent in sent_tokenize(" ".join(self.tokens))
]
if not hasattr(self, "_trigram_model"):
print("Building ngram index...", file=sys.stderr)
self._trigram_model = self._train_default_ngram_lm(
self._tokenized_sents, n=3
)
generated_tokens = []
assert length > 0, "The `length` must be more than 0."
while len(generated_tokens) < length:
for idx, token in enumerate(
self._trigram_model.generate(
length, text_seed=text_seed, random_seed=random_seed
)
):
if token == "<s>":
continue
if token == "</s>":
break
generated_tokens.append(token)
random_seed += 1
prefix = " ".join(text_seed) + " " if text_seed else ""
output_str = prefix + tokenwrap(generated_tokens[:length])
print(output_str)
return output_str
def plot(self, *args):
"""
See documentation for FreqDist.plot()
:seealso: nltk.prob.FreqDist.plot()
"""
return self.vocab().plot(*args)
def vocab(self):
"""
:seealso: nltk.prob.FreqDist
"""
if "_vocab" not in self.__dict__:
# print("Building vocabulary index...")
self._vocab = FreqDist(self)
return self._vocab
def findall(self, regexp):
"""
Find instances of the regular expression in the text.
The text is a list of tokens, and a regexp pattern to match
a single token must be surrounded by angle brackets. E.g.
>>> from nltk.book import text1, text5, text9
>>> text5.findall("<.*><.*><bro>")
you rule bro; telling you bro; u twizted bro
>>> text1.findall("<a>(<.*>)<man>")
monied; nervous; dangerous; white; white; white; pious; queer; good;
mature; white; Cape; great; wise; wise; butterless; white; fiendish;
pale; furious; better; certain; complete; dismasted; younger; brave;
brave; brave; brave
>>> text9.findall("<th.*>{3,}")
thread through those; the thought that; that the thing; the thing
that; that that thing; through these than through; them that the;
through the thick; them that they; thought that the
:param regexp: A regular expression
:type regexp: str
"""
if "_token_searcher" not in self.__dict__:
self._token_searcher = TokenSearcher(self)
hits = self._token_searcher.findall(regexp)
hits = [" ".join(h) for h in hits]
print(tokenwrap(hits, "; "))
# ////////////////////////////////////////////////////////////
# Helper Methods
# ////////////////////////////////////////////////////////////
_CONTEXT_RE = re.compile(r"\w+|[\.\!\?]")
def _context(self, tokens, i):
"""
One left & one right token, both case-normalized. Skip over
non-sentence-final punctuation. Used by the ``ContextIndex``
that is created for ``similar()`` and ``common_contexts()``.
"""
# Left context
j = i - 1
while j >= 0 and not self._CONTEXT_RE.match(tokens[j]):
j -= 1
left = tokens[j] if j != 0 else "*START*"
# Right context
j = i + 1
while j < len(tokens) and not self._CONTEXT_RE.match(tokens[j]):
j += 1
right = tokens[j] if j != len(tokens) else "*END*"
return (left, right)
# ////////////////////////////////////////////////////////////
# String Display
# ////////////////////////////////////////////////////////////
def __str__(self):
return "<Text: %s>" % self.name
def __repr__(self):
return "<Text: %s>" % self.name
def app():
t1 = Text(gutenberg.words("melville-moby_dick.txt"))
plot_word_freq_dist(t1) | null |
170,865 | import base64
import copy
import getopt
import io
import os
import pickle
import sys
import threading
import time
import webbrowser
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from sys import argv
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Lemma, Synset
The provided code snippet includes necessary dependencies for implementing the `get_unique_counter_from_url` function. Write a Python function `def get_unique_counter_from_url(sp)` to solve the following problem:
Extract the unique counter from the URL if it has one. Otherwise return null.
Here is the function:
def get_unique_counter_from_url(sp):
"""
Extract the unique counter from the URL if it has one. Otherwise return
null.
"""
pos = sp.rfind("%23")
if pos != -1:
return int(sp[(pos + 3) :])
else:
return None | Extract the unique counter from the URL if it has one. Otherwise return null. |
170,866 | import base64
import copy
import getopt
import io
import os
import pickle
import sys
import threading
import time
import webbrowser
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from sys import argv
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Lemma, Synset
def _italic(txt):
return "<i>%s</i>" % txt | null |
170,867 | import base64
import copy
import getopt
import io
import os
import pickle
import sys
import threading
import time
import webbrowser
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from sys import argv
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Lemma, Synset
def _li(txt):
return "<li>%s</li>" % txt | null |
170,868 | import base64
import copy
import getopt
import io
import os
import pickle
import sys
import threading
import time
import webbrowser
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from sys import argv
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Lemma, Synset
html_header = """
<!DOCTYPE html PUBLIC '-//W3C//DTD HTML 4.01//EN'
'http://www.w3.org/TR/html4/strict.dtd'>
<html>
<head>
<meta name='generator' content=
'HTML Tidy for Windows (vers 14 February 2006), see www.w3.org'>
<meta http-equiv='Content-Type' content=
'text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: %s</title></head>
<body bgcolor='#F5F5F5' text='#000000'>
"""
html_trailer = """
</body>
</html>
"""
The provided code snippet includes necessary dependencies for implementing the `pg` function. Write a Python function `def pg(word, body)` to solve the following problem:
Return a HTML page of NLTK Browser format constructed from the word and body :param word: The word that the body corresponds to :type word: str :param body: The HTML body corresponding to the word :type body: str :return: a HTML page for the word-body combination :rtype: str
Here is the function:
def pg(word, body):
"""
Return a HTML page of NLTK Browser format constructed from the
word and body
:param word: The word that the body corresponds to
:type word: str
:param body: The HTML body corresponding to the word
:type body: str
:return: a HTML page for the word-body combination
:rtype: str
"""
return (html_header % word) + body + html_trailer | Return a HTML page of NLTK Browser format constructed from the word and body :param word: The word that the body corresponds to :type word: str :param body: The HTML body corresponding to the word :type body: str :return: a HTML page for the word-body combination :rtype: str |
170,869 | import base64
import copy
import getopt
import io
import os
import pickle
import sys
import threading
import time
import webbrowser
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from sys import argv
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Lemma, Synset
def _ul(txt):
return "<ul>" + txt + "</ul>" | null |
170,870 | import base64
import copy
import getopt
import io
import os
import pickle
import sys
import threading
import time
import webbrowser
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from sys import argv
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Lemma, Synset
def _bold(txt):
return "<b>%s</b>" % txt
def _center(txt):
return "<center>%s</center>" % txt
The provided code snippet includes necessary dependencies for implementing the `_abbc` function. Write a Python function `def _abbc(txt)` to solve the following problem:
abbc = asterisks, breaks, bold, center
Here is the function:
def _abbc(txt):
"""
abbc = asterisks, breaks, bold, center
"""
return _center(_bold("<br>" * 10 + "*" * 10 + " " + txt + " " + "*" * 10)) | abbc = asterisks, breaks, bold, center |
170,871 | import base64
import copy
import getopt
import io
import os
import pickle
import sys
import threading
import time
import webbrowser
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from sys import argv
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Lemma, Synset
The provided code snippet includes necessary dependencies for implementing the `_get_synset` function. Write a Python function `def _get_synset(synset_key)` to solve the following problem:
The synset key is the unique name of the synset, this can be retrieved via synset.name()
Here is the function:
def _get_synset(synset_key):
"""
The synset key is the unique name of the synset, this can be
retrieved via synset.name()
"""
return wn.synset(synset_key) | The synset key is the unique name of the synset, this can be retrieved via synset.name() |
170,872 | import base64
import copy
import getopt
import io
import os
import pickle
import sys
import threading
import time
import webbrowser
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from sys import argv
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Lemma, Synset
class Reference:
"""
A reference to a page that may be generated by page_word
"""
def __init__(self, word, synset_relations=dict()):
"""
Build a reference to a new page.
word is the word or words (separated by commas) for which to
search for synsets of
synset_relations is a dictionary of synset keys to sets of
synset relation identifaiers to unfold a list of synset
relations for.
"""
self.word = word
self.synset_relations = synset_relations
def encode(self):
"""
Encode this reference into a string to be used in a URL.
"""
# This uses a tuple rather than an object since the python
# pickle representation is much smaller and there is no need
# to represent the complete object.
string = pickle.dumps((self.word, self.synset_relations), -1)
return base64.urlsafe_b64encode(string).decode()
def decode(string):
"""
Decode a reference encoded with Reference.encode
"""
string = base64.urlsafe_b64decode(string.encode())
word, synset_relations = RestrictedUnpickler(io.BytesIO(string)).load()
return Reference(word, synset_relations)
def toggle_synset_relation(self, synset, relation):
"""
Toggle the display of the relations for the given synset and
relation type.
This function will throw a KeyError if the synset is currently
not being displayed.
"""
if relation in self.synset_relations[synset.name()]:
self.synset_relations[synset.name()].remove(relation)
else:
self.synset_relations[synset.name()].add(relation)
return self
def toggle_synset(self, synset):
"""
Toggle displaying of the relation types for the given synset
"""
if synset.name() in self.synset_relations:
del self.synset_relations[synset.name()]
else:
self.synset_relations[synset.name()] = set()
return self
def page_from_reference(href):
"""
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
word = href.word
pos_forms = defaultdict(list)
words = word.split(",")
words = [w for w in [w.strip().lower().replace(" ", "_") for w in words] if w != ""]
if len(words) == 0:
# No words were found.
return "", "Please specify a word to search for."
# This looks up multiple words at once. This is probably not
# necessary and may lead to problems.
for w in words:
for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]:
form = wn.morphy(w, pos)
if form and form not in pos_forms[pos]:
pos_forms[pos].append(form)
body = ""
for pos, pos_str, name in _pos_tuples():
if pos in pos_forms:
body += _hlev(3, name) + "\n"
for w in pos_forms[pos]:
# Not all words of exc files are in the database, skip
# to the next word if a KeyError is raised.
try:
body += _collect_all_synsets(w, pos, href.synset_relations)
except KeyError:
pass
if not body:
body = "The word or words '%s' were not found in the dictionary." % word
return body, word
The provided code snippet includes necessary dependencies for implementing the `page_from_word` function. Write a Python function `def page_from_word(word)` to solve the following problem:
Return a HTML page for the given word. :type word: str :param word: The currently active word :return: A tuple (page,word), where page is the new current HTML page to be sent to the browser and word is the new current word :rtype: A tuple (str,str)
Here is the function:
def page_from_word(word):
"""
Return a HTML page for the given word.
:type word: str
:param word: The currently active word
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
return page_from_reference(Reference(word)) | Return a HTML page for the given word. :type word: str :param word: The currently active word :return: A tuple (page,word), where page is the new current HTML page to be sent to the browser and word is the new current word :rtype: A tuple (str,str) |
170,873 | import base64
import copy
import getopt
import io
import os
import pickle
import sys
import threading
import time
import webbrowser
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from sys import argv
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Lemma, Synset
class Reference:
"""
A reference to a page that may be generated by page_word
"""
def __init__(self, word, synset_relations=dict()):
"""
Build a reference to a new page.
word is the word or words (separated by commas) for which to
search for synsets of
synset_relations is a dictionary of synset keys to sets of
synset relation identifaiers to unfold a list of synset
relations for.
"""
self.word = word
self.synset_relations = synset_relations
def encode(self):
"""
Encode this reference into a string to be used in a URL.
"""
# This uses a tuple rather than an object since the python
# pickle representation is much smaller and there is no need
# to represent the complete object.
string = pickle.dumps((self.word, self.synset_relations), -1)
return base64.urlsafe_b64encode(string).decode()
def decode(string):
"""
Decode a reference encoded with Reference.encode
"""
string = base64.urlsafe_b64decode(string.encode())
word, synset_relations = RestrictedUnpickler(io.BytesIO(string)).load()
return Reference(word, synset_relations)
def toggle_synset_relation(self, synset, relation):
"""
Toggle the display of the relations for the given synset and
relation type.
This function will throw a KeyError if the synset is currently
not being displayed.
"""
if relation in self.synset_relations[synset.name()]:
self.synset_relations[synset.name()].remove(relation)
else:
self.synset_relations[synset.name()].add(relation)
return self
def toggle_synset(self, synset):
"""
Toggle displaying of the relation types for the given synset
"""
if synset.name() in self.synset_relations:
del self.synset_relations[synset.name()]
else:
self.synset_relations[synset.name()] = set()
return self
def page_from_reference(href):
"""
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
word = href.word
pos_forms = defaultdict(list)
words = word.split(",")
words = [w for w in [w.strip().lower().replace(" ", "_") for w in words] if w != ""]
if len(words) == 0:
# No words were found.
return "", "Please specify a word to search for."
# This looks up multiple words at once. This is probably not
# necessary and may lead to problems.
for w in words:
for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]:
form = wn.morphy(w, pos)
if form and form not in pos_forms[pos]:
pos_forms[pos].append(form)
body = ""
for pos, pos_str, name in _pos_tuples():
if pos in pos_forms:
body += _hlev(3, name) + "\n"
for w in pos_forms[pos]:
# Not all words of exc files are in the database, skip
# to the next word if a KeyError is raised.
try:
body += _collect_all_synsets(w, pos, href.synset_relations)
except KeyError:
pass
if not body:
body = "The word or words '%s' were not found in the dictionary." % word
return body, word
The provided code snippet includes necessary dependencies for implementing the `page_from_href` function. Write a Python function `def page_from_href(href)` to solve the following problem:
Returns a tuple of the HTML page built and the new current word :param href: The hypertext reference to be solved :type href: str :return: A tuple (page,word), where page is the new current HTML page to be sent to the browser and word is the new current word :rtype: A tuple (str,str)
Here is the function:
def page_from_href(href):
"""
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
return page_from_reference(Reference.decode(href)) | Returns a tuple of the HTML page built and the new current word :param href: The hypertext reference to be solved :type href: str :return: A tuple (page,word), where page is the new current HTML page to be sent to the browser and word is the new current word :rtype: A tuple (str,str) |
170,874 | import base64
import copy
import getopt
import io
import os
import pickle
import sys
import threading
import time
import webbrowser
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from sys import argv
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Lemma, Synset
def get_static_web_help_page():
"""
Return the static web help page.
"""
return """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2023 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <https://www.nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv='Content-Type' content='text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: * Help *</title>
</head>
<body bgcolor='#F5F5F5' text='#000000'>
<h2>NLTK Wordnet Browser Help</h2>
<p>The NLTK Wordnet Browser is a tool to use in browsing the Wordnet database. It tries to behave like the Wordnet project's web browser but the difference is that the NLTK Wordnet Browser uses a local Wordnet database.
<p><b>You are using the Javascript client part of the NLTK Wordnet BrowseServer.</b> We assume your browser is in tab sheets enabled mode.</p>
<p>For background information on Wordnet, see the Wordnet project home page: <a href="https://wordnet.princeton.edu/"><b> https://wordnet.princeton.edu/</b></a>. For more information on the NLTK project, see the project home:
<a href="https://www.nltk.org/"><b>https://www.nltk.org/</b></a>. To get an idea of what the Wordnet version used by this browser includes choose <b>Show Database Info</b> from the <b>View</b> submenu.</p>
<h3>Word search</h3>
<p>The word to be searched is typed into the <b>New Word</b> field and the search started with Enter or by clicking the <b>Search</b> button. There is no uppercase/lowercase distinction: the search word is transformed to lowercase before the search.</p>
<p>In addition, the word does not have to be in base form. The browser tries to find the possible base form(s) by making certain morphological substitutions. Typing <b>fLIeS</b> as an obscure example gives one <a href="MfLIeS">this</a>. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination.</p>
<p>The result of a search is a display of one or more
<b>synsets</b> for every part of speech in which a form of the
search word was found to occur. A synset is a set of words
having the same sense or meaning. Each word in a synset that is
underlined is a hyperlink which can be clicked to trigger an
automatic search for that word.</p>
<p>Every synset has a hyperlink <b>S:</b> at the start of its
display line. Clicking that symbol shows you the name of every
<b>relation</b> that this synset is part of. Every relation name is a hyperlink that opens up a display for that relation. Clicking it another time closes the display again. Clicking another relation name on a line that has an opened relation closes the open relation and opens the clicked relation.</p>
<p>It is also possible to give two or more words or collocations to be searched at the same time separating them with a comma like this <a href="Mcheer up,clear up">cheer up,clear up</a>, for example. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination. As you could see the search result includes the synsets found in the same order than the forms were given in the search field.</p>
<p>
There are also word level (lexical) relations recorded in the Wordnet database. Opening this kind of relation displays lines with a hyperlink <b>W:</b> at their beginning. Clicking this link shows more info on the word in question.</p>
<h3>The Buttons</h3>
<p>The <b>Search</b> and <b>Help</b> buttons need no more explanation. </p>
<p>The <b>Show Database Info</b> button shows a collection of Wordnet database statistics.</p>
<p>The <b>Shutdown the Server</b> button is shown for the first client of the BrowServer program i.e. for the client that is automatically launched when the BrowServer is started but not for the succeeding clients in order to protect the server from accidental shutdowns.
</p></body>
</html>
"""
def get_static_index_page(with_shutdown):
"""
Get the static index page.
"""
template = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">
<HTML>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2023 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <https://www.nltk.org/>
For license information, see LICENSE.TXT -->
<HEAD>
<TITLE>NLTK Wordnet Browser</TITLE>
</HEAD>
<frameset rows="7%%,93%%">
<frame src="%s" name="header">
<frame src="start_page" name="body">
</frameset>
</HTML>
"""
if with_shutdown:
upper_link = "upper.html"
else:
upper_link = "upper_2.html"
return template % upper_link
def get_static_upper_page(with_shutdown):
"""
Return the upper frame page,
If with_shutdown is True then a 'shutdown' button is also provided
to shutdown the server.
"""
template = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2023 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <https://www.nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
<title>Untitled Document</title>
</head>
<body>
<form method="GET" action="search" target="body">
Current Word: <input type="text" id="currentWord" size="10" disabled>
Next Word: <input type="text" id="nextWord" name="nextWord" size="10">
<input name="searchButton" type="submit" value="Search">
</form>
<a target="body" href="web_help.html">Help</a>
%s
</body>
</html>
"""
if with_shutdown:
shutdown_link = '<a href="SHUTDOWN THE SERVER">Shutdown</a>'
else:
shutdown_link = ""
return template % shutdown_link
The provided code snippet includes necessary dependencies for implementing the `get_static_page_by_path` function. Write a Python function `def get_static_page_by_path(path)` to solve the following problem:
Return a static HTML page from the path given.
Here is the function:
def get_static_page_by_path(path):
"""
Return a static HTML page from the path given.
"""
if path == "index_2.html":
return get_static_index_page(False)
elif path == "index.html":
return get_static_index_page(True)
elif path == "NLTK Wordnet Browser Database Info.html":
return "Display of Wordnet Database Statistics is not supported"
elif path == "upper_2.html":
return get_static_upper_page(False)
elif path == "upper.html":
return get_static_upper_page(True)
elif path == "web_help.html":
return get_static_web_help_page()
elif path == "wx_help.html":
return get_static_wx_help_page()
raise FileNotFoundError() | Return a static HTML page from the path given. |
170,875 | import base64
import copy
import getopt
import io
import os
import pickle
import sys
import threading
import time
import webbrowser
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from sys import argv
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Lemma, Synset
The provided code snippet includes necessary dependencies for implementing the `get_static_welcome_message` function. Write a Python function `def get_static_welcome_message()` to solve the following problem:
Get the static welcome page.
Here is the function:
def get_static_welcome_message():
"""
Get the static welcome page.
"""
return """
<h3>Search Help</h3>
<ul><li>The display below the line is an example of the output the browser
shows you when you enter a search word. The search word was <b>green</b>.</li>
<li>The search result shows for different parts of speech the <b>synsets</b>
i.e. different meanings for the word.</li>
<li>All underlined texts are hypertext links. There are two types of links:
word links and others. Clicking a word link carries out a search for the word
in the Wordnet database.</li>
<li>Clicking a link of the other type opens a display section of data attached
to that link. Clicking that link a second time closes the section again.</li>
<li>Clicking <u>S:</u> opens a section showing the relations for that synset.</li>
<li>Clicking on a relation name opens a section that displays the associated
synsets.</li>
<li>Type a search word in the <b>Next Word</b> field and start the search by the
<b>Enter/Return</b> key or click the <b>Search</b> button.</li>
</ul>
""" | Get the static welcome page. |
170,876 | import base64
import copy
import getopt
import io
import os
import pickle
import sys
import threading
import time
import webbrowser
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from sys import argv
from urllib.parse import unquote_plus
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Lemma, Synset
server_mode = None
def wnb(port=8000, runBrowser=True, logfilename=None):
"""
Run NLTK Wordnet Browser Server.
:param port: The port number for the server to listen on, defaults to
8000
:type port: int
:param runBrowser: True to start a web browser and point it at the web
server.
:type runBrowser: bool
"""
# The webbrowser module is unpredictable, typically it blocks if it uses
# a console web browser, and doesn't block if it uses a GUI webbrowser,
# so we need to force it to have a clear correct behaviour.
#
# Normally the server should run for as long as the user wants. they
# should idealy be able to control this from the UI by closing the
# window or tab. Second best would be clicking a button to say
# 'Shutdown' that first shutsdown the server and closes the window or
# tab, or exits the text-mode browser. Both of these are unfreasable.
#
# The next best alternative is to start the server, have it close when
# it receives SIGTERM (default), and run the browser as well. The user
# may have to shutdown both programs.
#
# Since webbrowser may block, and the webserver will block, we must run
# them in separate threads.
#
global server_mode, logfile
server_mode = not runBrowser
# Setup logging.
if logfilename:
try:
logfile = open(logfilename, "a", 1) # 1 means 'line buffering'
except OSError as e:
sys.stderr.write("Couldn't open %s for writing: %s", logfilename, e)
sys.exit(1)
else:
logfile = None
# Compute URL and start web browser
url = "http://localhost:" + str(port)
server_ready = None
browser_thread = None
if runBrowser:
server_ready = threading.Event()
browser_thread = startBrowser(url, server_ready)
# Start the server.
server = HTTPServer(("", port), MyServerHandler)
if logfile:
logfile.write("NLTK Wordnet browser server running serving: %s\n" % url)
if runBrowser:
server_ready.set()
try:
server.serve_forever()
except KeyboardInterrupt:
pass
if runBrowser:
browser_thread.join()
if logfile:
logfile.close()
def usage():
"""
Display the command line help message.
"""
print(__doc__)
argv: List[str]
def app():
# Parse and interpret options.
(opts, _) = getopt.getopt(
argv[1:], "l:p:sh", ["logfile=", "port=", "server-mode", "help"]
)
port = 8000
server_mode = False
help_mode = False
logfilename = None
for (opt, value) in opts:
if (opt == "-l") or (opt == "--logfile"):
logfilename = str(value)
elif (opt == "-p") or (opt == "--port"):
port = int(value)
elif (opt == "-s") or (opt == "--server-mode"):
server_mode = True
elif (opt == "-h") or (opt == "--help"):
help_mode = True
if help_mode:
usage()
else:
wnb(port, not server_mode, logfilename) | null |
170,877 | import itertools
import re
from tkinter import SEL_FIRST, SEL_LAST, Frame, Label, PhotoImage, Scrollbar, Text, Tk
windowTitle = "Finding (and Replacing) Nemo"
initialFind = r"n(.*?)e(.*?)m(.*?)o"
initialRepl = r"M\1A\2K\3I"
initialText = """\
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
"""
class FindZone(Zone):
def addTags(self, m):
def substitute(self, *args):
class ReplaceZone(Zone):
def addTags(self, m):
def substitute(self):
def launchRefresh(_):
def app():
global root, sz, rz, rex0
root = Tk()
root.resizable(height=False, width=True)
root.title(windowTitle)
root.minsize(width=250, height=0)
sz = FindZone("find", initialFind, initialText)
sz.fld.bind("<Button-1>", launchRefresh)
sz.fld.bind("<ButtonRelease-1>", launchRefresh)
sz.fld.bind("<B1-Motion>", launchRefresh)
sz.rexSel = re.compile("")
rz = ReplaceZone("repl", initialRepl, "")
rex0 = re.compile(r"(?<!\\)\\([0-9]+)")
root.bind_all("<Key>", launchRefresh)
launchRefresh(None)
root.mainloop() | null |
170,878 | import random
import re
import textwrap
import time
from tkinter import (
Button,
Canvas,
Checkbutton,
Frame,
IntVar,
Label,
Menu,
Scrollbar,
Text,
Tk,
)
from tkinter.filedialog import askopenfilename, asksaveasfilename
from tkinter.font import Font
from nltk.chunk import ChunkScore, RegexpChunkParser
from nltk.chunk.regexp import RegexpChunkRule
from nltk.corpus import conll2000, treebank_chunk
from nltk.draw.util import ShowText
from nltk.tree import Tree
from nltk.util import in_idle
class RegexpChunkApp:
"""
A graphical tool for exploring the regular expression based chunk
parser ``nltk.chunk.RegexpChunkParser``.
See ``HELP`` for instructional text.
"""
##/////////////////////////////////////////////////////////////////
## Help Text
##/////////////////////////////////////////////////////////////////
#: A dictionary mapping from part of speech tags to descriptions,
#: which is used in the help text. (This should probably live with
#: the conll and/or treebank corpus instead.)
TAGSET = {
"CC": "Coordinating conjunction",
"PRP$": "Possessive pronoun",
"CD": "Cardinal number",
"RB": "Adverb",
"DT": "Determiner",
"RBR": "Adverb, comparative",
"EX": "Existential there",
"RBS": "Adverb, superlative",
"FW": "Foreign word",
"RP": "Particle",
"JJ": "Adjective",
"TO": "to",
"JJR": "Adjective, comparative",
"UH": "Interjection",
"JJS": "Adjective, superlative",
"VB": "Verb, base form",
"LS": "List item marker",
"VBD": "Verb, past tense",
"MD": "Modal",
"NNS": "Noun, plural",
"NN": "Noun, singular or masps",
"VBN": "Verb, past participle",
"VBZ": "Verb,3rd ps. sing. present",
"NNP": "Proper noun, singular",
"NNPS": "Proper noun plural",
"WDT": "wh-determiner",
"PDT": "Predeterminer",
"WP": "wh-pronoun",
"POS": "Possessive ending",
"WP$": "Possessive wh-pronoun",
"PRP": "Personal pronoun",
"WRB": "wh-adverb",
"(": "open parenthesis",
")": "close parenthesis",
"``": "open quote",
",": "comma",
"''": "close quote",
".": "period",
"#": "pound sign (currency marker)",
"$": "dollar sign (currency marker)",
"IN": "Preposition/subord. conjunction",
"SYM": "Symbol (mathematical or scientific)",
"VBG": "Verb, gerund/present participle",
"VBP": "Verb, non-3rd ps. sing. present",
":": "colon",
}
#: Contents for the help box. This is a list of tuples, one for
#: each help page, where each tuple has four elements:
#: - A title (displayed as a tab)
#: - A string description of tabstops (see Tkinter.Text for details)
#: - The text contents for the help page. You can use expressions
#: like <red>...</red> to colorize the text; see ``HELP_AUTOTAG``
#: for a list of tags you can use for colorizing.
HELP = [
(
"Help",
"20",
"Welcome to the regular expression chunk-parser grammar editor. "
"You can use this editor to develop and test chunk parser grammars "
"based on NLTK's RegexpChunkParser class.\n\n"
# Help box.
"Use this box ('Help') to learn more about the editor; click on the "
"tabs for help on specific topics:"
"<indent>\n"
"Rules: grammar rule types\n"
"Regexps: regular expression syntax\n"
"Tags: part of speech tags\n</indent>\n"
# Grammar.
"Use the upper-left box ('Grammar') to edit your grammar. "
"Each line of your grammar specifies a single 'rule', "
"which performs an action such as creating a chunk or merging "
"two chunks.\n\n"
# Dev set.
"The lower-left box ('Development Set') runs your grammar on the "
"development set, and displays the results. "
"Your grammar's chunks are <highlight>highlighted</highlight>, and "
"the correct (gold standard) chunks are "
"<underline>underlined</underline>. If they "
"match, they are displayed in <green>green</green>; otherwise, "
"they are displayed in <red>red</red>. The box displays a single "
"sentence from the development set at a time; use the scrollbar or "
"the next/previous buttons view additional sentences.\n\n"
# Performance
"The lower-right box ('Evaluation') tracks the performance of "
"your grammar on the development set. The 'precision' axis "
"indicates how many of your grammar's chunks are correct; and "
"the 'recall' axis indicates how many of the gold standard "
"chunks your system generated. Typically, you should try to "
"design a grammar that scores high on both metrics. The "
"exact precision and recall of the current grammar, as well "
"as their harmonic mean (the 'f-score'), are displayed in "
"the status bar at the bottom of the window.",
),
(
"Rules",
"10",
"<h1>{...regexp...}</h1>"
"<indent>\nChunk rule: creates new chunks from words matching "
"regexp.</indent>\n\n"
"<h1>}...regexp...{</h1>"
"<indent>\nStrip rule: removes words matching regexp from existing "
"chunks.</indent>\n\n"
"<h1>...regexp1...}{...regexp2...</h1>"
"<indent>\nSplit rule: splits chunks that match regexp1 followed by "
"regexp2 in two.</indent>\n\n"
"<h1>...regexp...{}...regexp...</h1>"
"<indent>\nMerge rule: joins consecutive chunks that match regexp1 "
"and regexp2</indent>\n",
),
(
"Regexps",
"10 60",
# "Regular Expression Syntax Summary:\n\n"
"<h1>Pattern\t\tMatches...</h1>\n"
"<hangindent>"
"\t<<var>T</var>>\ta word with tag <var>T</var> "
"(where <var>T</var> may be a regexp).\n"
"\t<var>x</var>?\tan optional <var>x</var>\n"
"\t<var>x</var>+\ta sequence of 1 or more <var>x</var>'s\n"
"\t<var>x</var>*\ta sequence of 0 or more <var>x</var>'s\n"
"\t<var>x</var>|<var>y</var>\t<var>x</var> or <var>y</var>\n"
"\t.\tmatches any character\n"
"\t(<var>x</var>)\tTreats <var>x</var> as a group\n"
"\t# <var>x...</var>\tTreats <var>x...</var> "
"(to the end of the line) as a comment\n"
"\t\\<var>C</var>\tmatches character <var>C</var> "
"(useful when <var>C</var> is a special character "
"like + or #)\n"
"</hangindent>"
"\n<h1>Examples:</h1>\n"
"<hangindent>"
"\t<regexp><NN></regexp>\n"
'\t\tMatches <match>"cow/NN"</match>\n'
'\t\tMatches <match>"green/NN"</match>\n'
"\t<regexp><VB.*></regexp>\n"
'\t\tMatches <match>"eating/VBG"</match>\n'
'\t\tMatches <match>"ate/VBD"</match>\n'
"\t<regexp><IN><DT><NN></regexp>\n"
'\t\tMatches <match>"on/IN the/DT car/NN"</match>\n'
"\t<regexp><RB>?<VBD></regexp>\n"
'\t\tMatches <match>"ran/VBD"</match>\n'
'\t\tMatches <match>"slowly/RB ate/VBD"</match>\n'
r"\t<regexp><\#><CD> # This is a comment...</regexp>\n"
'\t\tMatches <match>"#/# 100/CD"</match>\n'
"</hangindent>",
),
(
"Tags",
"10 60",
"<h1>Part of Speech Tags:</h1>\n"
+ "<hangindent>"
+ "<<TAGSET>>"
+ "</hangindent>\n", # this gets auto-substituted w/ self.TAGSET
),
]
HELP_AUTOTAG = [
("red", dict(foreground="#a00")),
("green", dict(foreground="#080")),
("highlight", dict(background="#ddd")),
("underline", dict(underline=True)),
("h1", dict(underline=True)),
("indent", dict(lmargin1=20, lmargin2=20)),
("hangindent", dict(lmargin1=0, lmargin2=60)),
("var", dict(foreground="#88f")),
("regexp", dict(foreground="#ba7")),
("match", dict(foreground="#6a6")),
]
##/////////////////////////////////////////////////////////////////
## Config Parameters
##/////////////////////////////////////////////////////////////////
_EVAL_DELAY = 1
"""If the user has not pressed any key for this amount of time (in
seconds), and the current grammar has not been evaluated, then
the eval demon will evaluate it."""
_EVAL_CHUNK = 15
"""The number of sentences that should be evaluated by the eval
demon each time it runs."""
_EVAL_FREQ = 0.2
"""The frequency (in seconds) at which the eval demon is run"""
_EVAL_DEMON_MIN = 0.02
"""The minimum amount of time that the eval demon should take each time
it runs -- if it takes less than this time, _EVAL_CHUNK will be
modified upwards."""
_EVAL_DEMON_MAX = 0.04
"""The maximum amount of time that the eval demon should take each time
it runs -- if it takes more than this time, _EVAL_CHUNK will be
modified downwards."""
_GRAMMARBOX_PARAMS = dict(
width=40,
height=12,
background="#efe",
highlightbackground="#efe",
highlightthickness=1,
relief="groove",
border=2,
wrap="word",
)
_HELPBOX_PARAMS = dict(
width=15,
height=15,
background="#efe",
highlightbackground="#efe",
foreground="#555",
highlightthickness=1,
relief="groove",
border=2,
wrap="word",
)
_DEVSETBOX_PARAMS = dict(
width=70,
height=10,
background="#eef",
highlightbackground="#eef",
highlightthickness=1,
relief="groove",
border=2,
wrap="word",
tabs=(30,),
)
_STATUS_PARAMS = dict(background="#9bb", relief="groove", border=2)
_FONT_PARAMS = dict(family="helvetica", size=-20)
_FRAME_PARAMS = dict(background="#777", padx=2, pady=2, border=3)
_EVALBOX_PARAMS = dict(
background="#eef",
highlightbackground="#eef",
highlightthickness=1,
relief="groove",
border=2,
width=300,
height=280,
)
_BUTTON_PARAMS = dict(
background="#777", activebackground="#777", highlightbackground="#777"
)
_HELPTAB_BG_COLOR = "#aba"
_HELPTAB_FG_COLOR = "#efe"
_HELPTAB_FG_PARAMS = dict(background="#efe")
_HELPTAB_BG_PARAMS = dict(background="#aba")
_HELPTAB_SPACER = 6
def normalize_grammar(self, grammar):
# Strip comments
grammar = re.sub(r"((\\.|[^#])*)(#.*)?", r"\1", grammar)
# Normalize whitespace
grammar = re.sub(" +", " ", grammar)
grammar = re.sub(r"\n\s+", r"\n", grammar)
grammar = grammar.strip()
# [xx] Hack: automatically backslash $!
grammar = re.sub(r"([^\\])\$", r"\1\\$", grammar)
return grammar
def __init__(
self,
devset_name="conll2000",
devset=None,
grammar="",
chunk_label="NP",
tagset=None,
):
"""
:param devset_name: The name of the development set; used for
display & for save files. If either the name 'treebank'
or the name 'conll2000' is used, and devset is None, then
devset will be set automatically.
:param devset: A list of chunked sentences
:param grammar: The initial grammar to display.
:param tagset: Dictionary from tags to string descriptions, used
for the help page. Defaults to ``self.TAGSET``.
"""
self._chunk_label = chunk_label
if tagset is None:
tagset = self.TAGSET
self.tagset = tagset
# Named development sets:
if devset is None:
if devset_name == "conll2000":
devset = conll2000.chunked_sents("train.txt") # [:100]
elif devset == "treebank":
devset = treebank_chunk.chunked_sents() # [:100]
else:
raise ValueError("Unknown development set %s" % devset_name)
self.chunker = None
"""The chunker built from the grammar string"""
self.grammar = grammar
"""The unparsed grammar string"""
self.normalized_grammar = None
"""A normalized version of ``self.grammar``."""
self.grammar_changed = 0
"""The last time() that the grammar was changed."""
self.devset = devset
"""The development set -- a list of chunked sentences."""
self.devset_name = devset_name
"""The name of the development set (for save files)."""
self.devset_index = -1
"""The index into the development set of the first instance
that's currently being viewed."""
self._last_keypress = 0
"""The time() when a key was most recently pressed"""
self._history = []
"""A list of (grammar, precision, recall, fscore) tuples for
grammars that the user has already tried."""
self._history_index = 0
"""When the user is scrolling through previous grammars, this
is used to keep track of which grammar they're looking at."""
self._eval_grammar = None
"""The grammar that is being currently evaluated by the eval
demon."""
self._eval_normalized_grammar = None
"""A normalized copy of ``_eval_grammar``."""
self._eval_index = 0
"""The index of the next sentence in the development set that
should be looked at by the eval demon."""
self._eval_score = ChunkScore(chunk_label=chunk_label)
"""The ``ChunkScore`` object that's used to keep track of the score
of the current grammar on the development set."""
# Set up the main window.
top = self.top = Tk()
top.geometry("+50+50")
top.title("Regexp Chunk Parser App")
top.bind("<Control-q>", self.destroy)
# Variable that restricts how much of the devset we look at.
self._devset_size = IntVar(top)
self._devset_size.set(100)
# Set up all the tkinter widgets
self._init_fonts(top)
self._init_widgets(top)
self._init_bindings(top)
self._init_menubar(top)
self.grammarbox.focus()
# If a grammar was given, then display it.
if grammar:
self.grammarbox.insert("end", grammar + "\n")
self.grammarbox.mark_set("insert", "1.0")
# Display the first item in the development set
self.show_devset(0)
self.update()
def _init_bindings(self, top):
top.bind("<Control-n>", self._devset_next)
top.bind("<Control-p>", self._devset_prev)
top.bind("<Control-t>", self.toggle_show_trace)
top.bind("<KeyPress>", self.update)
top.bind("<Control-s>", lambda e: self.save_grammar())
top.bind("<Control-o>", lambda e: self.load_grammar())
self.grammarbox.bind("<Control-t>", self.toggle_show_trace)
self.grammarbox.bind("<Control-n>", self._devset_next)
self.grammarbox.bind("<Control-p>", self._devset_prev)
# Redraw the eval graph when the window size changes
self.evalbox.bind("<Configure>", self._eval_plot)
def _init_fonts(self, top):
# TWhat's our font size (default=same as sysfont)
self._size = IntVar(top)
self._size.set(20)
self._font = Font(family="helvetica", size=-self._size.get())
self._smallfont = Font(
family="helvetica", size=-(int(self._size.get() * 14 // 20))
)
def _init_menubar(self, parent):
menubar = Menu(parent)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Reset Application", underline=0, command=self.reset)
filemenu.add_command(
label="Save Current Grammar",
underline=0,
accelerator="Ctrl-s",
command=self.save_grammar,
)
filemenu.add_command(
label="Load Grammar",
underline=0,
accelerator="Ctrl-o",
command=self.load_grammar,
)
filemenu.add_command(
label="Save Grammar History", underline=13, command=self.save_history
)
filemenu.add_command(
label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-q"
)
menubar.add_cascade(label="File", underline=0, menu=filemenu)
viewmenu = Menu(menubar, tearoff=0)
viewmenu.add_radiobutton(
label="Tiny",
variable=self._size,
underline=0,
value=10,
command=self.resize,
)
viewmenu.add_radiobutton(
label="Small",
variable=self._size,
underline=0,
value=16,
command=self.resize,
)
viewmenu.add_radiobutton(
label="Medium",
variable=self._size,
underline=0,
value=20,
command=self.resize,
)
viewmenu.add_radiobutton(
label="Large",
variable=self._size,
underline=0,
value=24,
command=self.resize,
)
viewmenu.add_radiobutton(
label="Huge",
variable=self._size,
underline=0,
value=34,
command=self.resize,
)
menubar.add_cascade(label="View", underline=0, menu=viewmenu)
devsetmenu = Menu(menubar, tearoff=0)
devsetmenu.add_radiobutton(
label="50 sentences",
variable=self._devset_size,
value=50,
command=self.set_devset_size,
)
devsetmenu.add_radiobutton(
label="100 sentences",
variable=self._devset_size,
value=100,
command=self.set_devset_size,
)
devsetmenu.add_radiobutton(
label="200 sentences",
variable=self._devset_size,
value=200,
command=self.set_devset_size,
)
devsetmenu.add_radiobutton(
label="500 sentences",
variable=self._devset_size,
value=500,
command=self.set_devset_size,
)
menubar.add_cascade(label="Development-Set", underline=0, menu=devsetmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", underline=0, command=self.about)
menubar.add_cascade(label="Help", underline=0, menu=helpmenu)
parent.config(menu=menubar)
def toggle_show_trace(self, *e):
if self._showing_trace:
self.show_devset()
else:
self.show_trace()
return "break"
_SCALE_N = 5 # center on the last 5 examples.
_DRAW_LINES = False
def _eval_plot(self, *e, **config):
width = config.get("width", self.evalbox.winfo_width())
height = config.get("height", self.evalbox.winfo_height())
# Clear the canvas
self.evalbox.delete("all")
# Draw the precision & recall labels.
tag = self.evalbox.create_text(
10, height // 2 - 10, justify="left", anchor="w", text="Precision"
)
left, right = self.evalbox.bbox(tag)[2] + 5, width - 10
tag = self.evalbox.create_text(
left + (width - left) // 2,
height - 10,
anchor="s",
text="Recall",
justify="center",
)
top, bot = 10, self.evalbox.bbox(tag)[1] - 10
# Draw masks for clipping the plot.
bg = self._EVALBOX_PARAMS["background"]
self.evalbox.lower(
self.evalbox.create_rectangle(0, 0, left - 1, 5000, fill=bg, outline=bg)
)
self.evalbox.lower(
self.evalbox.create_rectangle(0, bot + 1, 5000, 5000, fill=bg, outline=bg)
)
# Calculate the plot's scale.
if self._autoscale.get() and len(self._history) > 1:
max_precision = max_recall = 0
min_precision = min_recall = 1
for i in range(1, min(len(self._history), self._SCALE_N + 1)):
grammar, precision, recall, fmeasure = self._history[-i]
min_precision = min(precision, min_precision)
min_recall = min(recall, min_recall)
max_precision = max(precision, max_precision)
max_recall = max(recall, max_recall)
# if max_precision-min_precision > max_recall-min_recall:
# min_recall -= (max_precision-min_precision)/2
# max_recall += (max_precision-min_precision)/2
# else:
# min_precision -= (max_recall-min_recall)/2
# max_precision += (max_recall-min_recall)/2
# if min_recall < 0:
# max_recall -= min_recall
# min_recall = 0
# if min_precision < 0:
# max_precision -= min_precision
# min_precision = 0
min_precision = max(min_precision - 0.01, 0)
min_recall = max(min_recall - 0.01, 0)
max_precision = min(max_precision + 0.01, 1)
max_recall = min(max_recall + 0.01, 1)
else:
min_precision = min_recall = 0
max_precision = max_recall = 1
# Draw the axis lines & grid lines
for i in range(11):
x = left + (right - left) * (
(i / 10.0 - min_recall) / (max_recall - min_recall)
)
y = bot - (bot - top) * (
(i / 10.0 - min_precision) / (max_precision - min_precision)
)
if left < x < right:
self.evalbox.create_line(x, top, x, bot, fill="#888")
if top < y < bot:
self.evalbox.create_line(left, y, right, y, fill="#888")
self.evalbox.create_line(left, top, left, bot)
self.evalbox.create_line(left, bot, right, bot)
# Display the plot's scale
self.evalbox.create_text(
left - 3,
bot,
justify="right",
anchor="se",
text="%d%%" % (100 * min_precision),
)
self.evalbox.create_text(
left - 3,
top,
justify="right",
anchor="ne",
text="%d%%" % (100 * max_precision),
)
self.evalbox.create_text(
left,
bot + 3,
justify="center",
anchor="nw",
text="%d%%" % (100 * min_recall),
)
self.evalbox.create_text(
right,
bot + 3,
justify="center",
anchor="ne",
text="%d%%" % (100 * max_recall),
)
# Display the scores.
prev_x = prev_y = None
for i, (_, precision, recall, fscore) in enumerate(self._history):
x = left + (right - left) * (
(recall - min_recall) / (max_recall - min_recall)
)
y = bot - (bot - top) * (
(precision - min_precision) / (max_precision - min_precision)
)
if i == self._history_index:
self.evalbox.create_oval(
x - 2, y - 2, x + 2, y + 2, fill="#0f0", outline="#000"
)
self.status["text"] = (
"Precision: %.2f%%\t" % (precision * 100)
+ "Recall: %.2f%%\t" % (recall * 100)
+ "F-score: %.2f%%" % (fscore * 100)
)
else:
self.evalbox.lower(
self.evalbox.create_oval(
x - 2, y - 2, x + 2, y + 2, fill="#afa", outline="#8c8"
)
)
if prev_x is not None and self._eval_lines.get():
self.evalbox.lower(
self.evalbox.create_line(prev_x, prev_y, x, y, fill="#8c8")
)
prev_x, prev_y = x, y
_eval_demon_running = False
def _eval_demon(self):
if self.top is None:
return
if self.chunker is None:
self._eval_demon_running = False
return
# Note our starting time.
t0 = time.time()
# If are still typing, then wait for them to finish.
if (
time.time() - self._last_keypress < self._EVAL_DELAY
and self.normalized_grammar != self._eval_normalized_grammar
):
self._eval_demon_running = True
return self.top.after(int(self._EVAL_FREQ * 1000), self._eval_demon)
# If the grammar changed, restart the evaluation.
if self.normalized_grammar != self._eval_normalized_grammar:
# Check if we've seen this grammar already. If so, then
# just use the old evaluation values.
for (g, p, r, f) in self._history:
if self.normalized_grammar == self.normalize_grammar(g):
self._history.append((g, p, r, f))
self._history_index = len(self._history) - 1
self._eval_plot()
self._eval_demon_running = False
self._eval_normalized_grammar = None
return
self._eval_index = 0
self._eval_score = ChunkScore(chunk_label=self._chunk_label)
self._eval_grammar = self.grammar
self._eval_normalized_grammar = self.normalized_grammar
# If the grammar is empty, the don't bother evaluating it, or
# recording it in history -- the score will just be 0.
if self.normalized_grammar.strip() == "":
# self._eval_index = self._devset_size.get()
self._eval_demon_running = False
return
# Score the next set of examples
for gold in self.devset[
self._eval_index : min(
self._eval_index + self._EVAL_CHUNK, self._devset_size.get()
)
]:
guess = self._chunkparse(gold.leaves())
self._eval_score.score(gold, guess)
# update our index in the devset.
self._eval_index += self._EVAL_CHUNK
# Check if we're done
if self._eval_index >= self._devset_size.get():
self._history.append(
(
self._eval_grammar,
self._eval_score.precision(),
self._eval_score.recall(),
self._eval_score.f_measure(),
)
)
self._history_index = len(self._history) - 1
self._eval_plot()
self._eval_demon_running = False
self._eval_normalized_grammar = None
else:
progress = 100 * self._eval_index / self._devset_size.get()
self.status["text"] = "Evaluating on Development Set (%d%%)" % progress
self._eval_demon_running = True
self._adaptively_modify_eval_chunk(time.time() - t0)
self.top.after(int(self._EVAL_FREQ * 1000), self._eval_demon)
def _adaptively_modify_eval_chunk(self, t):
"""
Modify _EVAL_CHUNK to try to keep the amount of time that the
eval demon takes between _EVAL_DEMON_MIN and _EVAL_DEMON_MAX.
:param t: The amount of time that the eval demon took.
"""
if t > self._EVAL_DEMON_MAX and self._EVAL_CHUNK > 5:
self._EVAL_CHUNK = min(
self._EVAL_CHUNK - 1,
max(
int(self._EVAL_CHUNK * (self._EVAL_DEMON_MAX / t)),
self._EVAL_CHUNK - 10,
),
)
elif t < self._EVAL_DEMON_MIN:
self._EVAL_CHUNK = max(
self._EVAL_CHUNK + 1,
min(
int(self._EVAL_CHUNK * (self._EVAL_DEMON_MIN / t)),
self._EVAL_CHUNK + 10,
),
)
def _init_widgets(self, top):
frame0 = Frame(top, **self._FRAME_PARAMS)
frame0.grid_columnconfigure(0, weight=4)
frame0.grid_columnconfigure(3, weight=2)
frame0.grid_rowconfigure(1, weight=1)
frame0.grid_rowconfigure(5, weight=1)
# The grammar
self.grammarbox = Text(frame0, font=self._font, **self._GRAMMARBOX_PARAMS)
self.grammarlabel = Label(
frame0,
font=self._font,
text="Grammar:",
highlightcolor="black",
background=self._GRAMMARBOX_PARAMS["background"],
)
self.grammarlabel.grid(column=0, row=0, sticky="SW")
self.grammarbox.grid(column=0, row=1, sticky="NEWS")
# Scroll bar for grammar
grammar_scrollbar = Scrollbar(frame0, command=self.grammarbox.yview)
grammar_scrollbar.grid(column=1, row=1, sticky="NWS")
self.grammarbox.config(yscrollcommand=grammar_scrollbar.set)
# grammar buttons
bg = self._FRAME_PARAMS["background"]
frame3 = Frame(frame0, background=bg)
frame3.grid(column=0, row=2, sticky="EW")
Button(
frame3,
text="Prev Grammar",
command=self._history_prev,
**self._BUTTON_PARAMS,
).pack(side="left")
Button(
frame3,
text="Next Grammar",
command=self._history_next,
**self._BUTTON_PARAMS,
).pack(side="left")
# Help box
self.helpbox = Text(frame0, font=self._smallfont, **self._HELPBOX_PARAMS)
self.helpbox.grid(column=3, row=1, sticky="NEWS")
self.helptabs = {}
bg = self._FRAME_PARAMS["background"]
helptab_frame = Frame(frame0, background=bg)
helptab_frame.grid(column=3, row=0, sticky="SW")
for i, (tab, tabstops, text) in enumerate(self.HELP):
label = Label(helptab_frame, text=tab, font=self._smallfont)
label.grid(column=i * 2, row=0, sticky="S")
# help_frame.grid_columnconfigure(i, weight=1)
# label.pack(side='left')
label.bind("<ButtonPress>", lambda e, tab=tab: self.show_help(tab))
self.helptabs[tab] = label
Frame(
helptab_frame, height=1, width=self._HELPTAB_SPACER, background=bg
).grid(column=i * 2 + 1, row=0)
self.helptabs[self.HELP[0][0]].configure(font=self._font)
self.helpbox.tag_config("elide", elide=True)
for (tag, params) in self.HELP_AUTOTAG:
self.helpbox.tag_config("tag-%s" % tag, **params)
self.show_help(self.HELP[0][0])
# Scroll bar for helpbox
help_scrollbar = Scrollbar(frame0, command=self.helpbox.yview)
self.helpbox.config(yscrollcommand=help_scrollbar.set)
help_scrollbar.grid(column=4, row=1, sticky="NWS")
# The dev set
frame4 = Frame(frame0, background=self._FRAME_PARAMS["background"])
self.devsetbox = Text(frame4, font=self._font, **self._DEVSETBOX_PARAMS)
self.devsetbox.pack(expand=True, fill="both")
self.devsetlabel = Label(
frame0,
font=self._font,
text="Development Set:",
justify="right",
background=self._DEVSETBOX_PARAMS["background"],
)
self.devsetlabel.grid(column=0, row=4, sticky="SW")
frame4.grid(column=0, row=5, sticky="NEWS")
# dev set scrollbars
self.devset_scroll = Scrollbar(frame0, command=self._devset_scroll)
self.devset_scroll.grid(column=1, row=5, sticky="NWS")
self.devset_xscroll = Scrollbar(
frame4, command=self.devsetbox.xview, orient="horiz"
)
self.devsetbox["xscrollcommand"] = self.devset_xscroll.set
self.devset_xscroll.pack(side="bottom", fill="x")
# dev set buttons
bg = self._FRAME_PARAMS["background"]
frame1 = Frame(frame0, background=bg)
frame1.grid(column=0, row=7, sticky="EW")
Button(
frame1,
text="Prev Example (Ctrl-p)",
command=self._devset_prev,
**self._BUTTON_PARAMS,
).pack(side="left")
Button(
frame1,
text="Next Example (Ctrl-n)",
command=self._devset_next,
**self._BUTTON_PARAMS,
).pack(side="left")
self.devset_button = Button(
frame1,
text="Show example",
command=self.show_devset,
state="disabled",
**self._BUTTON_PARAMS,
)
self.devset_button.pack(side="right")
self.trace_button = Button(
frame1, text="Show trace", command=self.show_trace, **self._BUTTON_PARAMS
)
self.trace_button.pack(side="right")
# evaluation box
self.evalbox = Canvas(frame0, **self._EVALBOX_PARAMS)
label = Label(
frame0,
font=self._font,
text="Evaluation:",
justify="right",
background=self._EVALBOX_PARAMS["background"],
)
label.grid(column=3, row=4, sticky="SW")
self.evalbox.grid(column=3, row=5, sticky="NEWS", columnspan=2)
# evaluation box buttons
bg = self._FRAME_PARAMS["background"]
frame2 = Frame(frame0, background=bg)
frame2.grid(column=3, row=7, sticky="EW")
self._autoscale = IntVar(self.top)
self._autoscale.set(False)
Checkbutton(
frame2,
variable=self._autoscale,
command=self._eval_plot,
text="Zoom",
**self._BUTTON_PARAMS,
).pack(side="left")
self._eval_lines = IntVar(self.top)
self._eval_lines.set(False)
Checkbutton(
frame2,
variable=self._eval_lines,
command=self._eval_plot,
text="Lines",
**self._BUTTON_PARAMS,
).pack(side="left")
Button(frame2, text="History", **self._BUTTON_PARAMS).pack(side="right")
# The status label
self.status = Label(frame0, font=self._font, **self._STATUS_PARAMS)
self.status.grid(column=0, row=9, sticky="NEW", padx=3, pady=2, columnspan=5)
# Help box & devset box can't be edited.
self.helpbox["state"] = "disabled"
self.devsetbox["state"] = "disabled"
# Spacers
bg = self._FRAME_PARAMS["background"]
Frame(frame0, height=10, width=0, background=bg).grid(column=0, row=3)
Frame(frame0, height=0, width=10, background=bg).grid(column=2, row=0)
Frame(frame0, height=6, width=0, background=bg).grid(column=0, row=8)
# pack the frame.
frame0.pack(fill="both", expand=True)
# Set up colors for the devset box
self.devsetbox.tag_config("true-pos", background="#afa", underline="True")
self.devsetbox.tag_config("false-neg", underline="True", foreground="#800")
self.devsetbox.tag_config("false-pos", background="#faa")
self.devsetbox.tag_config("trace", foreground="#666", wrap="none")
self.devsetbox.tag_config("wrapindent", lmargin2=30, wrap="none")
self.devsetbox.tag_config("error", foreground="#800")
# And for the grammarbox
self.grammarbox.tag_config("error", background="#fec")
self.grammarbox.tag_config("comment", foreground="#840")
self.grammarbox.tag_config("angle", foreground="#00f")
self.grammarbox.tag_config("brace", foreground="#0a0")
self.grammarbox.tag_config("hangindent", lmargin1=0, lmargin2=40)
_showing_trace = False
def show_trace(self, *e):
self._showing_trace = True
self.trace_button["state"] = "disabled"
self.devset_button["state"] = "normal"
self.devsetbox["state"] = "normal"
# self.devsetbox['wrap'] = 'none'
self.devsetbox.delete("1.0", "end")
self.devsetlabel["text"] = "Development Set (%d/%d)" % (
(self.devset_index + 1, self._devset_size.get())
)
if self.chunker is None:
self.devsetbox.insert("1.0", "Trace: waiting for a valid grammar.")
self.devsetbox.tag_add("error", "1.0", "end")
return # can't do anything more
gold_tree = self.devset[self.devset_index]
rules = self.chunker.rules()
# Calculate the tag sequence
tagseq = "\t"
charnum = [1]
for wordnum, (word, pos) in enumerate(gold_tree.leaves()):
tagseq += "%s " % pos
charnum.append(len(tagseq))
self.charnum = {
(i, j): charnum[j]
for i in range(len(rules) + 1)
for j in range(len(charnum))
}
self.linenum = {i: i * 2 + 2 for i in range(len(rules) + 1)}
for i in range(len(rules) + 1):
if i == 0:
self.devsetbox.insert("end", "Start:\n")
self.devsetbox.tag_add("trace", "end -2c linestart", "end -2c")
else:
self.devsetbox.insert("end", "Apply %s:\n" % rules[i - 1])
self.devsetbox.tag_add("trace", "end -2c linestart", "end -2c")
# Display the tag sequence.
self.devsetbox.insert("end", tagseq + "\n")
self.devsetbox.tag_add("wrapindent", "end -2c linestart", "end -2c")
# Run a partial parser, and extract gold & test chunks
chunker = RegexpChunkParser(rules[:i])
test_tree = self._chunkparse(gold_tree.leaves())
gold_chunks = self._chunks(gold_tree)
test_chunks = self._chunks(test_tree)
# Compare them.
for chunk in gold_chunks.intersection(test_chunks):
self._color_chunk(i, chunk, "true-pos")
for chunk in gold_chunks - test_chunks:
self._color_chunk(i, chunk, "false-neg")
for chunk in test_chunks - gold_chunks:
self._color_chunk(i, chunk, "false-pos")
self.devsetbox.insert("end", "Finished.\n")
self.devsetbox.tag_add("trace", "end -2c linestart", "end -2c")
# This is a hack, because the x-scrollbar isn't updating its
# position right -- I'm not sure what the underlying cause is
# though. (This is on OS X w/ python 2.5)
self.top.after(100, self.devset_xscroll.set, 0, 0.3)
def show_help(self, tab):
self.helpbox["state"] = "normal"
self.helpbox.delete("1.0", "end")
for (name, tabstops, text) in self.HELP:
if name == tab:
text = text.replace(
"<<TAGSET>>",
"\n".join(
"\t%s\t%s" % item
for item in sorted(
list(self.tagset.items()),
key=lambda t_w: re.match(r"\w+", t_w[0])
and (0, t_w[0])
or (1, t_w[0]),
)
),
)
self.helptabs[name].config(**self._HELPTAB_FG_PARAMS)
self.helpbox.config(tabs=tabstops)
self.helpbox.insert("1.0", text + "\n" * 20)
C = "1.0 + %d chars"
for (tag, params) in self.HELP_AUTOTAG:
pattern = f"(?s)(<{tag}>)(.*?)(</{tag}>)"
for m in re.finditer(pattern, text):
self.helpbox.tag_add("elide", C % m.start(1), C % m.end(1))
self.helpbox.tag_add(
"tag-%s" % tag, C % m.start(2), C % m.end(2)
)
self.helpbox.tag_add("elide", C % m.start(3), C % m.end(3))
else:
self.helptabs[name].config(**self._HELPTAB_BG_PARAMS)
self.helpbox["state"] = "disabled"
def _history_prev(self, *e):
self._view_history(self._history_index - 1)
return "break"
def _history_next(self, *e):
self._view_history(self._history_index + 1)
return "break"
def _view_history(self, index):
# Bounds & sanity checking:
index = max(0, min(len(self._history) - 1, index))
if not self._history:
return
# Already viewing the requested history item?
if index == self._history_index:
return
# Show the requested grammar. It will get added to _history
# only if they edit it (causing self.update() to get run.)
self.grammarbox["state"] = "normal"
self.grammarbox.delete("1.0", "end")
self.grammarbox.insert("end", self._history[index][0])
self.grammarbox.mark_set("insert", "1.0")
self._history_index = index
self._syntax_highlight_grammar(self._history[index][0])
# Record the normalized grammar & regenerate the chunker.
self.normalized_grammar = self.normalize_grammar(self._history[index][0])
if self.normalized_grammar:
rules = [
RegexpChunkRule.fromstring(line)
for line in self.normalized_grammar.split("\n")
]
else:
rules = []
self.chunker = RegexpChunkParser(rules)
# Show the score.
self._eval_plot()
# Update the devset box
self._highlight_devset()
if self._showing_trace:
self.show_trace()
# Update the grammar label
if self._history_index < len(self._history) - 1:
self.grammarlabel["text"] = "Grammar {}/{}:".format(
self._history_index + 1,
len(self._history),
)
else:
self.grammarlabel["text"] = "Grammar:"
def _devset_next(self, *e):
self._devset_scroll("scroll", 1, "page")
return "break"
def _devset_prev(self, *e):
self._devset_scroll("scroll", -1, "page")
return "break"
def destroy(self, *e):
if self.top is None:
return
self.top.destroy()
self.top = None
def _devset_scroll(self, command, *args):
N = 1 # size of a page -- one sentence.
showing_trace = self._showing_trace
if command == "scroll" and args[1].startswith("unit"):
self.show_devset(self.devset_index + int(args[0]))
elif command == "scroll" and args[1].startswith("page"):
self.show_devset(self.devset_index + N * int(args[0]))
elif command == "moveto":
self.show_devset(int(float(args[0]) * self._devset_size.get()))
else:
assert 0, f"bad scroll command {command} {args}"
if showing_trace:
self.show_trace()
def show_devset(self, index=None):
if index is None:
index = self.devset_index
# Bounds checking
index = min(max(0, index), self._devset_size.get() - 1)
if index == self.devset_index and not self._showing_trace:
return
self.devset_index = index
self._showing_trace = False
self.trace_button["state"] = "normal"
self.devset_button["state"] = "disabled"
# Clear the text box.
self.devsetbox["state"] = "normal"
self.devsetbox["wrap"] = "word"
self.devsetbox.delete("1.0", "end")
self.devsetlabel["text"] = "Development Set (%d/%d)" % (
(self.devset_index + 1, self._devset_size.get())
)
# Add the sentences
sample = self.devset[self.devset_index : self.devset_index + 1]
self.charnum = {}
self.linenum = {0: 1}
for sentnum, sent in enumerate(sample):
linestr = ""
for wordnum, (word, pos) in enumerate(sent.leaves()):
self.charnum[sentnum, wordnum] = len(linestr)
linestr += f"{word}/{pos} "
self.charnum[sentnum, wordnum + 1] = len(linestr)
self.devsetbox.insert("end", linestr[:-1] + "\n\n")
# Highlight chunks in the dev set
if self.chunker is not None:
self._highlight_devset()
self.devsetbox["state"] = "disabled"
# Update the scrollbar
first = self.devset_index / self._devset_size.get()
last = (self.devset_index + 2) / self._devset_size.get()
self.devset_scroll.set(first, last)
def _chunks(self, tree):
chunks = set()
wordnum = 0
for child in tree:
if isinstance(child, Tree):
if child.label() == self._chunk_label:
chunks.add((wordnum, wordnum + len(child)))
wordnum += len(child)
else:
wordnum += 1
return chunks
def _syntax_highlight_grammar(self, grammar):
if self.top is None:
return
self.grammarbox.tag_remove("comment", "1.0", "end")
self.grammarbox.tag_remove("angle", "1.0", "end")
self.grammarbox.tag_remove("brace", "1.0", "end")
self.grammarbox.tag_add("hangindent", "1.0", "end")
for lineno, line in enumerate(grammar.split("\n")):
if not line.strip():
continue
m = re.match(r"(\\.|[^#])*(#.*)?", line)
comment_start = None
if m.group(2):
comment_start = m.start(2)
s = "%d.%d" % (lineno + 1, m.start(2))
e = "%d.%d" % (lineno + 1, m.end(2))
self.grammarbox.tag_add("comment", s, e)
for m in re.finditer("[<>{}]", line):
if comment_start is not None and m.start() >= comment_start:
break
s = "%d.%d" % (lineno + 1, m.start())
e = "%d.%d" % (lineno + 1, m.end())
if m.group() in "<>":
self.grammarbox.tag_add("angle", s, e)
else:
self.grammarbox.tag_add("brace", s, e)
def _grammarcheck(self, grammar):
if self.top is None:
return
self.grammarbox.tag_remove("error", "1.0", "end")
self._grammarcheck_errs = []
for lineno, line in enumerate(grammar.split("\n")):
line = re.sub(r"((\\.|[^#])*)(#.*)?", r"\1", line)
line = line.strip()
if line:
try:
RegexpChunkRule.fromstring(line)
except ValueError as e:
self.grammarbox.tag_add(
"error", "%s.0" % (lineno + 1), "%s.0 lineend" % (lineno + 1)
)
self.status["text"] = ""
def update(self, *event):
# Record when update was called (for grammarcheck)
if event:
self._last_keypress = time.time()
# Read the grammar from the Text box.
self.grammar = grammar = self.grammarbox.get("1.0", "end")
# If the grammar hasn't changed, do nothing:
normalized_grammar = self.normalize_grammar(grammar)
if normalized_grammar == self.normalized_grammar:
return
else:
self.normalized_grammar = normalized_grammar
# If the grammar has changed, and we're looking at history,
# then stop looking at history.
if self._history_index < len(self._history) - 1:
self.grammarlabel["text"] = "Grammar:"
self._syntax_highlight_grammar(grammar)
# The grammar has changed; try parsing it. If it doesn't
# parse, do nothing. (flag error location?)
try:
# Note: the normalized grammar has no blank lines.
if normalized_grammar:
rules = [
RegexpChunkRule.fromstring(line)
for line in normalized_grammar.split("\n")
]
else:
rules = []
except ValueError as e:
# Use the un-normalized grammar for error highlighting.
self._grammarcheck(grammar)
self.chunker = None
return
self.chunker = RegexpChunkParser(rules)
self.grammarbox.tag_remove("error", "1.0", "end")
self.grammar_changed = time.time()
# Display the results
if self._showing_trace:
self.show_trace()
else:
self._highlight_devset()
# Start the eval demon
if not self._eval_demon_running:
self._eval_demon()
def _highlight_devset(self, sample=None):
if sample is None:
sample = self.devset[self.devset_index : self.devset_index + 1]
self.devsetbox.tag_remove("true-pos", "1.0", "end")
self.devsetbox.tag_remove("false-neg", "1.0", "end")
self.devsetbox.tag_remove("false-pos", "1.0", "end")
# Run the grammar on the test cases.
for sentnum, gold_tree in enumerate(sample):
# Run the chunk parser
test_tree = self._chunkparse(gold_tree.leaves())
# Extract gold & test chunks
gold_chunks = self._chunks(gold_tree)
test_chunks = self._chunks(test_tree)
# Compare them.
for chunk in gold_chunks.intersection(test_chunks):
self._color_chunk(sentnum, chunk, "true-pos")
for chunk in gold_chunks - test_chunks:
self._color_chunk(sentnum, chunk, "false-neg")
for chunk in test_chunks - gold_chunks:
self._color_chunk(sentnum, chunk, "false-pos")
def _chunkparse(self, words):
try:
return self.chunker.parse(words)
except (ValueError, IndexError) as e:
# There's an error somewhere in the grammar, but we're not sure
# exactly where, so just mark the whole grammar as bad.
# E.g., this is caused by: "({<NN>})"
self.grammarbox.tag_add("error", "1.0", "end")
# Treat it as tagging nothing:
return words
def _color_chunk(self, sentnum, chunk, tag):
start, end = chunk
self.devsetbox.tag_add(
tag,
f"{self.linenum[sentnum]}.{self.charnum[sentnum, start]}",
f"{self.linenum[sentnum]}.{self.charnum[sentnum, end] - 1}",
)
def reset(self):
# Clear various variables
self.chunker = None
self.grammar = None
self.normalized_grammar = None
self.grammar_changed = 0
self._history = []
self._history_index = 0
# Update the on-screen display.
self.grammarbox.delete("1.0", "end")
self.show_devset(0)
self.update()
# self._eval_plot()
SAVE_GRAMMAR_TEMPLATE = (
"# Regexp Chunk Parsing Grammar\n"
"# Saved %(date)s\n"
"#\n"
"# Development set: %(devset)s\n"
"# Precision: %(precision)s\n"
"# Recall: %(recall)s\n"
"# F-score: %(fscore)s\n\n"
"%(grammar)s\n"
)
def save_grammar(self, filename=None):
if not filename:
ftypes = [("Chunk Gramamr", ".chunk"), ("All files", "*")]
filename = asksaveasfilename(filetypes=ftypes, defaultextension=".chunk")
if not filename:
return
if self._history and self.normalized_grammar == self.normalize_grammar(
self._history[-1][0]
):
precision, recall, fscore = (
"%.2f%%" % (100 * v) for v in self._history[-1][1:]
)
elif self.chunker is None:
precision = recall = fscore = "Grammar not well formed"
else:
precision = recall = fscore = "Not finished evaluation yet"
with open(filename, "w") as outfile:
outfile.write(
self.SAVE_GRAMMAR_TEMPLATE
% dict(
date=time.ctime(),
devset=self.devset_name,
precision=precision,
recall=recall,
fscore=fscore,
grammar=self.grammar.strip(),
)
)
def load_grammar(self, filename=None):
if not filename:
ftypes = [("Chunk Gramamr", ".chunk"), ("All files", "*")]
filename = askopenfilename(filetypes=ftypes, defaultextension=".chunk")
if not filename:
return
self.grammarbox.delete("1.0", "end")
self.update()
with open(filename) as infile:
grammar = infile.read()
grammar = re.sub(
r"^\# Regexp Chunk Parsing Grammar[\s\S]*" "F-score:.*\n", "", grammar
).lstrip()
self.grammarbox.insert("1.0", grammar)
self.update()
def save_history(self, filename=None):
if not filename:
ftypes = [("Chunk Gramamr History", ".txt"), ("All files", "*")]
filename = asksaveasfilename(filetypes=ftypes, defaultextension=".txt")
if not filename:
return
with open(filename, "w") as outfile:
outfile.write("# Regexp Chunk Parsing Grammar History\n")
outfile.write("# Saved %s\n" % time.ctime())
outfile.write("# Development set: %s\n" % self.devset_name)
for i, (g, p, r, f) in enumerate(self._history):
hdr = (
"Grammar %d/%d (precision=%.2f%%, recall=%.2f%%, "
"fscore=%.2f%%)"
% (i + 1, len(self._history), p * 100, r * 100, f * 100)
)
outfile.write("\n%s\n" % hdr)
outfile.write("".join(" %s\n" % line for line in g.strip().split()))
if not (
self._history
and self.normalized_grammar
== self.normalize_grammar(self._history[-1][0])
):
if self.chunker is None:
outfile.write("\nCurrent Grammar (not well-formed)\n")
else:
outfile.write("\nCurrent Grammar (not evaluated)\n")
outfile.write(
"".join(" %s\n" % line for line in self.grammar.strip().split())
)
def about(self, *e):
ABOUT = "NLTK RegExp Chunk Parser Application\n" + "Written by Edward Loper"
TITLE = "About: Regular Expression Chunk Parser Application"
try:
from tkinter.messagebox import Message
Message(message=ABOUT, title=TITLE).show()
except:
ShowText(self.top, TITLE, ABOUT)
def set_devset_size(self, size=None):
if size is not None:
self._devset_size.set(size)
self._devset_size.set(min(len(self.devset), self._devset_size.get()))
self.show_devset(1)
self.show_devset(0)
# what about history? Evaluated at diff dev set sizes!
def resize(self, size=None):
if size is not None:
self._size.set(size)
size = self._size.get()
self._font.configure(size=-(abs(size)))
self._smallfont.configure(size=min(-10, -(abs(size)) * 14 // 20))
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this demo is created from a non-interactive program (e.g.
from a secript); otherwise, the demo will close as soon as
the script completes.
"""
if in_idle():
return
self.top.mainloop(*args, **kwargs)
def app():
RegexpChunkApp().mainloop() | null |
170,879 | from tkinter import Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk
from tkinter.font import Font
from nltk.draw import CFGEditor, TreeSegmentWidget, tree_to_treesegment
from nltk.draw.util import CanvasFrame, EntryDialog, ShowText, TextWidget
from nltk.parse import SteppingRecursiveDescentParser
from nltk.tree import Tree
from nltk.util import in_idle
class RecursiveDescentApp:
"""
A graphical tool for exploring the recursive descent parser. The tool
displays the parser's tree and the remaining text, and allows the
user to control the parser's operation. In particular, the user
can expand subtrees on the frontier, match tokens on the frontier
against the text, and backtrack. A "step" button simply steps
through the parsing process, performing the operations that
``RecursiveDescentParser`` would use.
"""
def __init__(self, grammar, sent, trace=0):
self._sent = sent
self._parser = SteppingRecursiveDescentParser(grammar, trace)
# Set up the main window.
self._top = Tk()
self._top.title("Recursive Descent Parser Application")
# Set up key bindings.
self._init_bindings()
# Initialize the fonts.
self._init_fonts(self._top)
# Animations. animating_lock is a lock to prevent the demo
# from performing new operations while it's animating.
self._animation_frames = IntVar(self._top)
self._animation_frames.set(5)
self._animating_lock = 0
self._autostep = 0
# The user can hide the grammar.
self._show_grammar = IntVar(self._top)
self._show_grammar.set(1)
# Create the basic frames.
self._init_menubar(self._top)
self._init_buttons(self._top)
self._init_feedback(self._top)
self._init_grammar(self._top)
self._init_canvas(self._top)
# Initialize the parser.
self._parser.initialize(self._sent)
# Resize callback
self._canvas.bind("<Configure>", self._configure)
#########################################
## Initialization Helpers
#########################################
def _init_fonts(self, root):
# See: <http://www.astro.washington.edu/owen/ROTKFolklore.html>
self._sysfont = Font(font=Button()["font"])
root.option_add("*Font", self._sysfont)
# TWhat's our font size (default=same as sysfont)
self._size = IntVar(root)
self._size.set(self._sysfont.cget("size"))
self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get())
self._font = Font(family="helvetica", size=self._size.get())
if self._size.get() < 0:
big = self._size.get() - 2
else:
big = self._size.get() + 2
self._bigfont = Font(family="helvetica", weight="bold", size=big)
def _init_grammar(self, parent):
# Grammar view.
self._prodframe = listframe = Frame(parent)
self._prodframe.pack(fill="both", side="left", padx=2)
self._prodlist_label = Label(
self._prodframe, font=self._boldfont, text="Available Expansions"
)
self._prodlist_label.pack()
self._prodlist = Listbox(
self._prodframe,
selectmode="single",
relief="groove",
background="white",
foreground="#909090",
font=self._font,
selectforeground="#004040",
selectbackground="#c0f0c0",
)
self._prodlist.pack(side="right", fill="both", expand=1)
self._productions = list(self._parser.grammar().productions())
for production in self._productions:
self._prodlist.insert("end", (" %s" % production))
self._prodlist.config(height=min(len(self._productions), 25))
# Add a scrollbar if there are more than 25 productions.
if len(self._productions) > 25:
listscroll = Scrollbar(self._prodframe, orient="vertical")
self._prodlist.config(yscrollcommand=listscroll.set)
listscroll.config(command=self._prodlist.yview)
listscroll.pack(side="left", fill="y")
# If they select a production, apply it.
self._prodlist.bind("<<ListboxSelect>>", self._prodlist_select)
def _init_bindings(self):
# Key bindings are a good thing.
self._top.bind("<Control-q>", self.destroy)
self._top.bind("<Control-x>", self.destroy)
self._top.bind("<Escape>", self.destroy)
self._top.bind("e", self.expand)
# self._top.bind('<Alt-e>', self.expand)
# self._top.bind('<Control-e>', self.expand)
self._top.bind("m", self.match)
self._top.bind("<Alt-m>", self.match)
self._top.bind("<Control-m>", self.match)
self._top.bind("b", self.backtrack)
self._top.bind("<Alt-b>", self.backtrack)
self._top.bind("<Control-b>", self.backtrack)
self._top.bind("<Control-z>", self.backtrack)
self._top.bind("<BackSpace>", self.backtrack)
self._top.bind("a", self.autostep)
# self._top.bind('<Control-a>', self.autostep)
self._top.bind("<Control-space>", self.autostep)
self._top.bind("<Control-c>", self.cancel_autostep)
self._top.bind("<space>", self.step)
self._top.bind("<Delete>", self.reset)
self._top.bind("<Control-p>", self.postscript)
# self._top.bind('<h>', self.help)
# self._top.bind('<Alt-h>', self.help)
self._top.bind("<Control-h>", self.help)
self._top.bind("<F1>", self.help)
# self._top.bind('<g>', self.toggle_grammar)
# self._top.bind('<Alt-g>', self.toggle_grammar)
# self._top.bind('<Control-g>', self.toggle_grammar)
self._top.bind("<Control-g>", self.edit_grammar)
self._top.bind("<Control-t>", self.edit_sentence)
def _init_buttons(self, parent):
# Set up the frames.
self._buttonframe = buttonframe = Frame(parent)
buttonframe.pack(fill="none", side="bottom", padx=3, pady=2)
Button(
buttonframe,
text="Step",
background="#90c0d0",
foreground="black",
command=self.step,
).pack(side="left")
Button(
buttonframe,
text="Autostep",
background="#90c0d0",
foreground="black",
command=self.autostep,
).pack(side="left")
Button(
buttonframe,
text="Expand",
underline=0,
background="#90f090",
foreground="black",
command=self.expand,
).pack(side="left")
Button(
buttonframe,
text="Match",
underline=0,
background="#90f090",
foreground="black",
command=self.match,
).pack(side="left")
Button(
buttonframe,
text="Backtrack",
underline=0,
background="#f0a0a0",
foreground="black",
command=self.backtrack,
).pack(side="left")
# Replace autostep...
# self._autostep_button = Button(buttonframe, text='Autostep',
# underline=0, command=self.autostep)
# self._autostep_button.pack(side='left')
def _configure(self, event):
self._autostep = 0
(x1, y1, x2, y2) = self._cframe.scrollregion()
y2 = event.height - 6
self._canvas["scrollregion"] = "%d %d %d %d" % (x1, y1, x2, y2)
self._redraw()
def _init_feedback(self, parent):
self._feedbackframe = feedbackframe = Frame(parent)
feedbackframe.pack(fill="x", side="bottom", padx=3, pady=3)
self._lastoper_label = Label(
feedbackframe, text="Last Operation:", font=self._font
)
self._lastoper_label.pack(side="left")
lastoperframe = Frame(feedbackframe, relief="sunken", border=1)
lastoperframe.pack(fill="x", side="right", expand=1, padx=5)
self._lastoper1 = Label(
lastoperframe, foreground="#007070", background="#f0f0f0", font=self._font
)
self._lastoper2 = Label(
lastoperframe,
anchor="w",
width=30,
foreground="#004040",
background="#f0f0f0",
font=self._font,
)
self._lastoper1.pack(side="left")
self._lastoper2.pack(side="left", fill="x", expand=1)
def _init_canvas(self, parent):
self._cframe = CanvasFrame(
parent,
background="white",
# width=525, height=250,
closeenough=10,
border=2,
relief="sunken",
)
self._cframe.pack(expand=1, fill="both", side="top", pady=2)
canvas = self._canvas = self._cframe.canvas()
# Initially, there's no tree or text
self._tree = None
self._textwidgets = []
self._textline = None
def _init_menubar(self, parent):
menubar = Menu(parent)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(
label="Reset Parser", underline=0, command=self.reset, accelerator="Del"
)
filemenu.add_command(
label="Print to Postscript",
underline=0,
command=self.postscript,
accelerator="Ctrl-p",
)
filemenu.add_command(
label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x"
)
menubar.add_cascade(label="File", underline=0, menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(
label="Edit Grammar",
underline=5,
command=self.edit_grammar,
accelerator="Ctrl-g",
)
editmenu.add_command(
label="Edit Text",
underline=5,
command=self.edit_sentence,
accelerator="Ctrl-t",
)
menubar.add_cascade(label="Edit", underline=0, menu=editmenu)
rulemenu = Menu(menubar, tearoff=0)
rulemenu.add_command(
label="Step", underline=1, command=self.step, accelerator="Space"
)
rulemenu.add_separator()
rulemenu.add_command(
label="Match", underline=0, command=self.match, accelerator="Ctrl-m"
)
rulemenu.add_command(
label="Expand", underline=0, command=self.expand, accelerator="Ctrl-e"
)
rulemenu.add_separator()
rulemenu.add_command(
label="Backtrack", underline=0, command=self.backtrack, accelerator="Ctrl-b"
)
menubar.add_cascade(label="Apply", underline=0, menu=rulemenu)
viewmenu = Menu(menubar, tearoff=0)
viewmenu.add_checkbutton(
label="Show Grammar",
underline=0,
variable=self._show_grammar,
command=self._toggle_grammar,
)
viewmenu.add_separator()
viewmenu.add_radiobutton(
label="Tiny",
variable=self._size,
underline=0,
value=10,
command=self.resize,
)
viewmenu.add_radiobutton(
label="Small",
variable=self._size,
underline=0,
value=12,
command=self.resize,
)
viewmenu.add_radiobutton(
label="Medium",
variable=self._size,
underline=0,
value=14,
command=self.resize,
)
viewmenu.add_radiobutton(
label="Large",
variable=self._size,
underline=0,
value=18,
command=self.resize,
)
viewmenu.add_radiobutton(
label="Huge",
variable=self._size,
underline=0,
value=24,
command=self.resize,
)
menubar.add_cascade(label="View", underline=0, menu=viewmenu)
animatemenu = Menu(menubar, tearoff=0)
animatemenu.add_radiobutton(
label="No Animation", underline=0, variable=self._animation_frames, value=0
)
animatemenu.add_radiobutton(
label="Slow Animation",
underline=0,
variable=self._animation_frames,
value=10,
accelerator="-",
)
animatemenu.add_radiobutton(
label="Normal Animation",
underline=0,
variable=self._animation_frames,
value=5,
accelerator="=",
)
animatemenu.add_radiobutton(
label="Fast Animation",
underline=0,
variable=self._animation_frames,
value=2,
accelerator="+",
)
menubar.add_cascade(label="Animate", underline=1, menu=animatemenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", underline=0, command=self.about)
helpmenu.add_command(
label="Instructions", underline=0, command=self.help, accelerator="F1"
)
menubar.add_cascade(label="Help", underline=0, menu=helpmenu)
parent.config(menu=menubar)
#########################################
## Helper
#########################################
def _get(self, widget, treeloc):
for i in treeloc:
widget = widget.subtrees()[i]
if isinstance(widget, TreeSegmentWidget):
widget = widget.label()
return widget
#########################################
## Main draw procedure
#########################################
def _redraw(self):
canvas = self._canvas
# Delete the old tree, widgets, etc.
if self._tree is not None:
self._cframe.destroy_widget(self._tree)
for twidget in self._textwidgets:
self._cframe.destroy_widget(twidget)
if self._textline is not None:
self._canvas.delete(self._textline)
# Draw the tree.
helv = ("helvetica", -self._size.get())
bold = ("helvetica", -self._size.get(), "bold")
attribs = {
"tree_color": "#000000",
"tree_width": 2,
"node_font": bold,
"leaf_font": helv,
}
tree = self._parser.tree()
self._tree = tree_to_treesegment(canvas, tree, **attribs)
self._cframe.add_widget(self._tree, 30, 5)
# Draw the text.
helv = ("helvetica", -self._size.get())
bottom = y = self._cframe.scrollregion()[3]
self._textwidgets = [
TextWidget(canvas, word, font=self._font) for word in self._sent
]
for twidget in self._textwidgets:
self._cframe.add_widget(twidget, 0, 0)
twidget.move(0, bottom - twidget.bbox()[3] - 5)
y = min(y, twidget.bbox()[1])
# Draw a line over the text, to separate it from the tree.
self._textline = canvas.create_line(-5000, y - 5, 5000, y - 5, dash=".")
# Highlight appropriate nodes.
self._highlight_nodes()
self._highlight_prodlist()
# Make sure the text lines up.
self._position_text()
def _redraw_quick(self):
# This should be more-or-less sufficient after an animation.
self._highlight_nodes()
self._highlight_prodlist()
self._position_text()
def _highlight_nodes(self):
# Highlight the list of nodes to be checked.
bold = ("helvetica", -self._size.get(), "bold")
for treeloc in self._parser.frontier()[:1]:
self._get(self._tree, treeloc)["color"] = "#20a050"
self._get(self._tree, treeloc)["font"] = bold
for treeloc in self._parser.frontier()[1:]:
self._get(self._tree, treeloc)["color"] = "#008080"
def _highlight_prodlist(self):
# Highlight the productions that can be expanded.
# Boy, too bad tkinter doesn't implement Listbox.itemconfig;
# that would be pretty useful here.
self._prodlist.delete(0, "end")
expandable = self._parser.expandable_productions()
untried = self._parser.untried_expandable_productions()
productions = self._productions
for index in range(len(productions)):
if productions[index] in expandable:
if productions[index] in untried:
self._prodlist.insert(index, " %s" % productions[index])
else:
self._prodlist.insert(index, " %s (TRIED)" % productions[index])
self._prodlist.selection_set(index)
else:
self._prodlist.insert(index, " %s" % productions[index])
def _position_text(self):
# Line up the text widgets that are matched against the tree
numwords = len(self._sent)
num_matched = numwords - len(self._parser.remaining_text())
leaves = self._tree_leaves()[:num_matched]
xmax = self._tree.bbox()[0]
for i in range(0, len(leaves)):
widget = self._textwidgets[i]
leaf = leaves[i]
widget["color"] = "#006040"
leaf["color"] = "#006040"
widget.move(leaf.bbox()[0] - widget.bbox()[0], 0)
xmax = widget.bbox()[2] + 10
# Line up the text widgets that are not matched against the tree.
for i in range(len(leaves), numwords):
widget = self._textwidgets[i]
widget["color"] = "#a0a0a0"
widget.move(xmax - widget.bbox()[0], 0)
xmax = widget.bbox()[2] + 10
# If we have a complete parse, make everything green :)
if self._parser.currently_complete():
for twidget in self._textwidgets:
twidget["color"] = "#00a000"
# Move the matched leaves down to the text.
for i in range(0, len(leaves)):
widget = self._textwidgets[i]
leaf = leaves[i]
dy = widget.bbox()[1] - leaf.bbox()[3] - 10.0
dy = max(dy, leaf.parent().label().bbox()[3] - leaf.bbox()[3] + 10)
leaf.move(0, dy)
def _tree_leaves(self, tree=None):
if tree is None:
tree = self._tree
if isinstance(tree, TreeSegmentWidget):
leaves = []
for child in tree.subtrees():
leaves += self._tree_leaves(child)
return leaves
else:
return [tree]
#########################################
## Button Callbacks
#########################################
def destroy(self, *e):
self._autostep = 0
if self._top is None:
return
self._top.destroy()
self._top = None
def reset(self, *e):
self._autostep = 0
self._parser.initialize(self._sent)
self._lastoper1["text"] = "Reset Application"
self._lastoper2["text"] = ""
self._redraw()
def autostep(self, *e):
if self._animation_frames.get() == 0:
self._animation_frames.set(2)
if self._autostep:
self._autostep = 0
else:
self._autostep = 1
self._step()
def cancel_autostep(self, *e):
# self._autostep_button['text'] = 'Autostep'
self._autostep = 0
# Make sure to stop auto-stepping if we get any user input.
def step(self, *e):
self._autostep = 0
self._step()
def match(self, *e):
self._autostep = 0
self._match()
def expand(self, *e):
self._autostep = 0
self._expand()
def backtrack(self, *e):
self._autostep = 0
self._backtrack()
def _step(self):
if self._animating_lock:
return
# Try expanding, matching, and backtracking (in that order)
if self._expand():
pass
elif self._parser.untried_match() and self._match():
pass
elif self._backtrack():
pass
else:
self._lastoper1["text"] = "Finished"
self._lastoper2["text"] = ""
self._autostep = 0
# Check if we just completed a parse.
if self._parser.currently_complete():
self._autostep = 0
self._lastoper2["text"] += " [COMPLETE PARSE]"
def _expand(self, *e):
if self._animating_lock:
return
old_frontier = self._parser.frontier()
rv = self._parser.expand()
if rv is not None:
self._lastoper1["text"] = "Expand:"
self._lastoper2["text"] = rv
self._prodlist.selection_clear(0, "end")
index = self._productions.index(rv)
self._prodlist.selection_set(index)
self._animate_expand(old_frontier[0])
return True
else:
self._lastoper1["text"] = "Expand:"
self._lastoper2["text"] = "(all expansions tried)"
return False
def _match(self, *e):
if self._animating_lock:
return
old_frontier = self._parser.frontier()
rv = self._parser.match()
if rv is not None:
self._lastoper1["text"] = "Match:"
self._lastoper2["text"] = rv
self._animate_match(old_frontier[0])
return True
else:
self._lastoper1["text"] = "Match:"
self._lastoper2["text"] = "(failed)"
return False
def _backtrack(self, *e):
if self._animating_lock:
return
if self._parser.backtrack():
elt = self._parser.tree()
for i in self._parser.frontier()[0]:
elt = elt[i]
self._lastoper1["text"] = "Backtrack"
self._lastoper2["text"] = ""
if isinstance(elt, Tree):
self._animate_backtrack(self._parser.frontier()[0])
else:
self._animate_match_backtrack(self._parser.frontier()[0])
return True
else:
self._autostep = 0
self._lastoper1["text"] = "Finished"
self._lastoper2["text"] = ""
return False
def about(self, *e):
ABOUT = (
"NLTK Recursive Descent Parser Application\n" + "Written by Edward Loper"
)
TITLE = "About: Recursive Descent Parser Application"
try:
from tkinter.messagebox import Message
Message(message=ABOUT, title=TITLE).show()
except:
ShowText(self._top, TITLE, ABOUT)
def help(self, *e):
self._autostep = 0
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(
self._top,
"Help: Recursive Descent Parser Application",
(__doc__ or "").strip(),
width=75,
font="fixed",
)
except:
ShowText(
self._top,
"Help: Recursive Descent Parser Application",
(__doc__ or "").strip(),
width=75,
)
def postscript(self, *e):
self._autostep = 0
self._cframe.print_to_file()
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this demo is created from a non-interactive program (e.g.
from a secript); otherwise, the demo will close as soon as
the script completes.
"""
if in_idle():
return
self._top.mainloop(*args, **kwargs)
def resize(self, size=None):
if size is not None:
self._size.set(size)
size = self._size.get()
self._font.configure(size=-(abs(size)))
self._boldfont.configure(size=-(abs(size)))
self._sysfont.configure(size=-(abs(size)))
self._bigfont.configure(size=-(abs(size + 2)))
self._redraw()
#########################################
## Expand Production Selection
#########################################
def _toggle_grammar(self, *e):
if self._show_grammar.get():
self._prodframe.pack(
fill="both", side="left", padx=2, after=self._feedbackframe
)
self._lastoper1["text"] = "Show Grammar"
else:
self._prodframe.pack_forget()
self._lastoper1["text"] = "Hide Grammar"
self._lastoper2["text"] = ""
# def toggle_grammar(self, *e):
# self._show_grammar = not self._show_grammar
# if self._show_grammar:
# self._prodframe.pack(fill='both', expand='y', side='left',
# after=self._feedbackframe)
# self._lastoper1['text'] = 'Show Grammar'
# else:
# self._prodframe.pack_forget()
# self._lastoper1['text'] = 'Hide Grammar'
# self._lastoper2['text'] = ''
def _prodlist_select(self, event):
selection = self._prodlist.curselection()
if len(selection) != 1:
return
index = int(selection[0])
old_frontier = self._parser.frontier()
production = self._parser.expand(self._productions[index])
if production:
self._lastoper1["text"] = "Expand:"
self._lastoper2["text"] = production
self._prodlist.selection_clear(0, "end")
self._prodlist.selection_set(index)
self._animate_expand(old_frontier[0])
else:
# Reset the production selections.
self._prodlist.selection_clear(0, "end")
for prod in self._parser.expandable_productions():
index = self._productions.index(prod)
self._prodlist.selection_set(index)
#########################################
## Animation
#########################################
def _animate_expand(self, treeloc):
oldwidget = self._get(self._tree, treeloc)
oldtree = oldwidget.parent()
top = not isinstance(oldtree.parent(), TreeSegmentWidget)
tree = self._parser.tree()
for i in treeloc:
tree = tree[i]
widget = tree_to_treesegment(
self._canvas,
tree,
node_font=self._boldfont,
leaf_color="white",
tree_width=2,
tree_color="white",
node_color="white",
leaf_font=self._font,
)
widget.label()["color"] = "#20a050"
(oldx, oldy) = oldtree.label().bbox()[:2]
(newx, newy) = widget.label().bbox()[:2]
widget.move(oldx - newx, oldy - newy)
if top:
self._cframe.add_widget(widget, 0, 5)
widget.move(30 - widget.label().bbox()[0], 0)
self._tree = widget
else:
oldtree.parent().replace_child(oldtree, widget)
# Move the children over so they don't overlap.
# Line the children up in a strange way.
if widget.subtrees():
dx = (
oldx
+ widget.label().width() / 2
- widget.subtrees()[0].bbox()[0] / 2
- widget.subtrees()[0].bbox()[2] / 2
)
for subtree in widget.subtrees():
subtree.move(dx, 0)
self._makeroom(widget)
if top:
self._cframe.destroy_widget(oldtree)
else:
oldtree.destroy()
colors = [
"gray%d" % (10 * int(10 * x / self._animation_frames.get()))
for x in range(self._animation_frames.get(), 0, -1)
]
# Move the text string down, if necessary.
dy = widget.bbox()[3] + 30 - self._canvas.coords(self._textline)[1]
if dy > 0:
for twidget in self._textwidgets:
twidget.move(0, dy)
self._canvas.move(self._textline, 0, dy)
self._animate_expand_frame(widget, colors)
def _makeroom(self, treeseg):
"""
Make sure that no sibling tree bbox's overlap.
"""
parent = treeseg.parent()
if not isinstance(parent, TreeSegmentWidget):
return
index = parent.subtrees().index(treeseg)
# Handle siblings to the right
rsiblings = parent.subtrees()[index + 1 :]
if rsiblings:
dx = treeseg.bbox()[2] - rsiblings[0].bbox()[0] + 10
for sibling in rsiblings:
sibling.move(dx, 0)
# Handle siblings to the left
if index > 0:
lsibling = parent.subtrees()[index - 1]
dx = max(0, lsibling.bbox()[2] - treeseg.bbox()[0] + 10)
treeseg.move(dx, 0)
# Keep working up the tree.
self._makeroom(parent)
def _animate_expand_frame(self, widget, colors):
if len(colors) > 0:
self._animating_lock = 1
widget["color"] = colors[0]
for subtree in widget.subtrees():
if isinstance(subtree, TreeSegmentWidget):
subtree.label()["color"] = colors[0]
else:
subtree["color"] = colors[0]
self._top.after(50, self._animate_expand_frame, widget, colors[1:])
else:
widget["color"] = "black"
for subtree in widget.subtrees():
if isinstance(subtree, TreeSegmentWidget):
subtree.label()["color"] = "black"
else:
subtree["color"] = "black"
self._redraw_quick()
widget.label()["color"] = "black"
self._animating_lock = 0
if self._autostep:
self._step()
def _animate_backtrack(self, treeloc):
# Flash red first, if we're animating.
if self._animation_frames.get() == 0:
colors = []
else:
colors = ["#a00000", "#000000", "#a00000"]
colors += [
"gray%d" % (10 * int(10 * x / (self._animation_frames.get())))
for x in range(1, self._animation_frames.get() + 1)
]
widgets = [self._get(self._tree, treeloc).parent()]
for subtree in widgets[0].subtrees():
if isinstance(subtree, TreeSegmentWidget):
widgets.append(subtree.label())
else:
widgets.append(subtree)
self._animate_backtrack_frame(widgets, colors)
def _animate_backtrack_frame(self, widgets, colors):
if len(colors) > 0:
self._animating_lock = 1
for widget in widgets:
widget["color"] = colors[0]
self._top.after(50, self._animate_backtrack_frame, widgets, colors[1:])
else:
for widget in widgets[0].subtrees():
widgets[0].remove_child(widget)
widget.destroy()
self._redraw_quick()
self._animating_lock = 0
if self._autostep:
self._step()
def _animate_match_backtrack(self, treeloc):
widget = self._get(self._tree, treeloc)
node = widget.parent().label()
dy = (node.bbox()[3] - widget.bbox()[1] + 14) / max(
1, self._animation_frames.get()
)
self._animate_match_backtrack_frame(self._animation_frames.get(), widget, dy)
def _animate_match(self, treeloc):
widget = self._get(self._tree, treeloc)
dy = (self._textwidgets[0].bbox()[1] - widget.bbox()[3] - 10.0) / max(
1, self._animation_frames.get()
)
self._animate_match_frame(self._animation_frames.get(), widget, dy)
def _animate_match_frame(self, frame, widget, dy):
if frame > 0:
self._animating_lock = 1
widget.move(0, dy)
self._top.after(10, self._animate_match_frame, frame - 1, widget, dy)
else:
widget["color"] = "#006040"
self._redraw_quick()
self._animating_lock = 0
if self._autostep:
self._step()
def _animate_match_backtrack_frame(self, frame, widget, dy):
if frame > 0:
self._animating_lock = 1
widget.move(0, dy)
self._top.after(
10, self._animate_match_backtrack_frame, frame - 1, widget, dy
)
else:
widget.parent().remove_child(widget)
widget.destroy()
self._animating_lock = 0
if self._autostep:
self._step()
def edit_grammar(self, *e):
CFGEditor(self._top, self._parser.grammar(), self.set_grammar)
def set_grammar(self, grammar):
self._parser.set_grammar(grammar)
self._productions = list(grammar.productions())
self._prodlist.delete(0, "end")
for production in self._productions:
self._prodlist.insert("end", (" %s" % production))
def edit_sentence(self, *e):
sentence = " ".join(self._sent)
title = "Edit Text"
instr = "Enter a new sentence to parse."
EntryDialog(self._top, sentence, instr, self.set_sentence, title)
def set_sentence(self, sentence):
self._sent = sentence.split() # [XX] use tagged?
self.reset()
class CFG:
"""
A context-free grammar. A grammar consists of a start state and
a set of productions. The set of terminals and nonterminals is
implicitly specified by the productions.
If you need efficient key-based access to productions, you
can use a subclass to implement it.
"""
def __init__(self, start, productions, calculate_leftcorners=True):
"""
Create a new context-free grammar, from the given start state
and set of ``Production`` instances.
:param start: The start symbol
:type start: Nonterminal
:param productions: The list of productions that defines the grammar
:type productions: list(Production)
:param calculate_leftcorners: False if we don't want to calculate the
leftcorner relation. In that case, some optimized chart parsers won't work.
:type calculate_leftcorners: bool
"""
if not is_nonterminal(start):
raise TypeError(
"start should be a Nonterminal object,"
" not a %s" % type(start).__name__
)
self._start = start
self._productions = productions
self._categories = {prod.lhs() for prod in productions}
self._calculate_indexes()
self._calculate_grammar_forms()
if calculate_leftcorners:
self._calculate_leftcorners()
def _calculate_indexes(self):
self._lhs_index = {}
self._rhs_index = {}
self._empty_index = {}
self._lexical_index = {}
for prod in self._productions:
# Left hand side.
lhs = prod._lhs
if lhs not in self._lhs_index:
self._lhs_index[lhs] = []
self._lhs_index[lhs].append(prod)
if prod._rhs:
# First item in right hand side.
rhs0 = prod._rhs[0]
if rhs0 not in self._rhs_index:
self._rhs_index[rhs0] = []
self._rhs_index[rhs0].append(prod)
else:
# The right hand side is empty.
self._empty_index[prod.lhs()] = prod
# Lexical tokens in the right hand side.
for token in prod._rhs:
if is_terminal(token):
self._lexical_index.setdefault(token, set()).add(prod)
def _calculate_leftcorners(self):
# Calculate leftcorner relations, for use in optimized parsing.
self._immediate_leftcorner_categories = {cat: {cat} for cat in self._categories}
self._immediate_leftcorner_words = {cat: set() for cat in self._categories}
for prod in self.productions():
if len(prod) > 0:
cat, left = prod.lhs(), prod.rhs()[0]
if is_nonterminal(left):
self._immediate_leftcorner_categories[cat].add(left)
else:
self._immediate_leftcorner_words[cat].add(left)
lc = transitive_closure(self._immediate_leftcorner_categories, reflexive=True)
self._leftcorners = lc
self._leftcorner_parents = invert_graph(lc)
nr_leftcorner_categories = sum(
map(len, self._immediate_leftcorner_categories.values())
)
nr_leftcorner_words = sum(map(len, self._immediate_leftcorner_words.values()))
if nr_leftcorner_words > nr_leftcorner_categories > 10000:
# If the grammar is big, the leftcorner-word dictionary will be too large.
# In that case it is better to calculate the relation on demand.
self._leftcorner_words = None
return
self._leftcorner_words = {}
for cat in self._leftcorners:
lefts = self._leftcorners[cat]
lc = self._leftcorner_words[cat] = set()
for left in lefts:
lc.update(self._immediate_leftcorner_words.get(left, set()))
def fromstring(cls, input, encoding=None):
"""
Return the grammar instance corresponding to the input string(s).
:param input: a grammar, either in the form of a string or as a list of strings.
"""
start, productions = read_grammar(
input, standard_nonterm_parser, encoding=encoding
)
return cls(start, productions)
def start(self):
"""
Return the start symbol of the grammar
:rtype: Nonterminal
"""
return self._start
# tricky to balance readability and efficiency here!
# can't use set operations as they don't preserve ordering
def productions(self, lhs=None, rhs=None, empty=False):
"""
Return the grammar productions, filtered by the left-hand side
or the first item in the right-hand side.
:param lhs: Only return productions with the given left-hand side.
:param rhs: Only return productions with the given first item
in the right-hand side.
:param empty: Only return productions with an empty right-hand side.
:return: A list of productions matching the given constraints.
:rtype: list(Production)
"""
if rhs and empty:
raise ValueError(
"You cannot select empty and non-empty " "productions at the same time."
)
# no constraints so return everything
if not lhs and not rhs:
if not empty:
return self._productions
else:
return self._empty_index.values()
# only lhs specified so look up its index
elif lhs and not rhs:
if not empty:
return self._lhs_index.get(lhs, [])
elif lhs in self._empty_index:
return [self._empty_index[lhs]]
else:
return []
# only rhs specified so look up its index
elif rhs and not lhs:
return self._rhs_index.get(rhs, [])
# intersect
else:
return [
prod
for prod in self._lhs_index.get(lhs, [])
if prod in self._rhs_index.get(rhs, [])
]
def leftcorners(self, cat):
"""
Return the set of all nonterminals that the given nonterminal
can start with, including itself.
This is the reflexive, transitive closure of the immediate
leftcorner relation: (A > B) iff (A -> B beta)
:param cat: the parent of the leftcorners
:type cat: Nonterminal
:return: the set of all leftcorners
:rtype: set(Nonterminal)
"""
return self._leftcorners.get(cat, {cat})
def is_leftcorner(self, cat, left):
"""
True if left is a leftcorner of cat, where left can be a
terminal or a nonterminal.
:param cat: the parent of the leftcorner
:type cat: Nonterminal
:param left: the suggested leftcorner
:type left: Terminal or Nonterminal
:rtype: bool
"""
if is_nonterminal(left):
return left in self.leftcorners(cat)
elif self._leftcorner_words:
return left in self._leftcorner_words.get(cat, set())
else:
return any(
left in self._immediate_leftcorner_words.get(parent, set())
for parent in self.leftcorners(cat)
)
def leftcorner_parents(self, cat):
"""
Return the set of all nonterminals for which the given category
is a left corner. This is the inverse of the leftcorner relation.
:param cat: the suggested leftcorner
:type cat: Nonterminal
:return: the set of all parents to the leftcorner
:rtype: set(Nonterminal)
"""
return self._leftcorner_parents.get(cat, {cat})
def check_coverage(self, tokens):
"""
Check whether the grammar rules cover the given list of tokens.
If not, then raise an exception.
:type tokens: list(str)
"""
missing = [tok for tok in tokens if not self._lexical_index.get(tok)]
if missing:
missing = ", ".join(f"{w!r}" for w in missing)
raise ValueError(
"Grammar does not cover some of the " "input words: %r." % missing
)
def _calculate_grammar_forms(self):
"""
Pre-calculate of which form(s) the grammar is.
"""
prods = self._productions
self._is_lexical = all(p.is_lexical() for p in prods)
self._is_nonlexical = all(p.is_nonlexical() for p in prods if len(p) != 1)
self._min_len = min(len(p) for p in prods)
self._max_len = max(len(p) for p in prods)
self._all_unary_are_lexical = all(p.is_lexical() for p in prods if len(p) == 1)
def is_lexical(self):
"""
Return True if all productions are lexicalised.
"""
return self._is_lexical
def is_nonlexical(self):
"""
Return True if all lexical rules are "preterminals", that is,
unary rules which can be separated in a preprocessing step.
This means that all productions are of the forms
A -> B1 ... Bn (n>=0), or A -> "s".
Note: is_lexical() and is_nonlexical() are not opposites.
There are grammars which are neither, and grammars which are both.
"""
return self._is_nonlexical
def min_len(self):
"""
Return the right-hand side length of the shortest grammar production.
"""
return self._min_len
def max_len(self):
"""
Return the right-hand side length of the longest grammar production.
"""
return self._max_len
def is_nonempty(self):
"""
Return True if there are no empty productions.
"""
return self._min_len > 0
def is_binarised(self):
"""
Return True if all productions are at most binary.
Note that there can still be empty and unary productions.
"""
return self._max_len <= 2
def is_flexible_chomsky_normal_form(self):
"""
Return True if all productions are of the forms
A -> B C, A -> B, or A -> "s".
"""
return self.is_nonempty() and self.is_nonlexical() and self.is_binarised()
def is_chomsky_normal_form(self):
"""
Return True if the grammar is of Chomsky Normal Form, i.e. all productions
are of the form A -> B C, or A -> "s".
"""
return self.is_flexible_chomsky_normal_form() and self._all_unary_are_lexical
def chomsky_normal_form(self, new_token_padding="@$@", flexible=False):
"""
Returns a new Grammar that is in chomsky normal
:param: new_token_padding
Customise new rule formation during binarisation
"""
if self.is_chomsky_normal_form():
return self
if self.productions(empty=True):
raise ValueError(
"Grammar has Empty rules. " "Cannot deal with them at the moment"
)
# check for mixed rules
for rule in self.productions():
if rule.is_lexical() and len(rule.rhs()) > 1:
raise ValueError(
f"Cannot handled mixed rule {rule.lhs()} => {rule.rhs()}"
)
step1 = CFG.eliminate_start(self)
step2 = CFG.binarize(step1, new_token_padding)
if flexible:
return step2
step3 = CFG.remove_unitary_rules(step2)
step4 = CFG(step3.start(), list(set(step3.productions())))
return step4
def remove_unitary_rules(cls, grammar):
"""
Remove nonlexical unitary rules and convert them to
lexical
"""
result = []
unitary = []
for rule in grammar.productions():
if len(rule) == 1 and rule.is_nonlexical():
unitary.append(rule)
else:
result.append(rule)
while unitary:
rule = unitary.pop(0)
for item in grammar.productions(lhs=rule.rhs()[0]):
new_rule = Production(rule.lhs(), item.rhs())
if len(new_rule) != 1 or new_rule.is_lexical():
result.append(new_rule)
else:
unitary.append(new_rule)
n_grammar = CFG(grammar.start(), result)
return n_grammar
def binarize(cls, grammar, padding="@$@"):
"""
Convert all non-binary rules into binary by introducing
new tokens.
Example::
Original:
A => B C D
After Conversion:
A => B A@$@B
A@$@B => C D
"""
result = []
for rule in grammar.productions():
if len(rule.rhs()) > 2:
# this rule needs to be broken down
left_side = rule.lhs()
for k in range(0, len(rule.rhs()) - 2):
tsym = rule.rhs()[k]
new_sym = Nonterminal(left_side.symbol() + padding + tsym.symbol())
new_production = Production(left_side, (tsym, new_sym))
left_side = new_sym
result.append(new_production)
last_prd = Production(left_side, rule.rhs()[-2:])
result.append(last_prd)
else:
result.append(rule)
n_grammar = CFG(grammar.start(), result)
return n_grammar
def eliminate_start(cls, grammar):
"""
Eliminate start rule in case it appears on RHS
Example: S -> S0 S1 and S0 -> S1 S
Then another rule S0_Sigma -> S is added
"""
start = grammar.start()
result = []
need_to_add = None
for rule in grammar.productions():
if start in rule.rhs():
need_to_add = True
result.append(rule)
if need_to_add:
start = Nonterminal("S0_SIGMA")
result.append(Production(start, [grammar.start()]))
n_grammar = CFG(start, result)
return n_grammar
return grammar
def __repr__(self):
return "<Grammar with %d productions>" % len(self._productions)
def __str__(self):
result = "Grammar with %d productions" % len(self._productions)
result += " (start state = %r)" % self._start
for production in self._productions:
result += "\n %s" % production
return result
The provided code snippet includes necessary dependencies for implementing the `app` function. Write a Python function `def app()` to solve the following problem:
Create a recursive descent parser demo, using a simple grammar and text.
Here is the function:
def app():
"""
Create a recursive descent parser demo, using a simple grammar and
text.
"""
from nltk.grammar import CFG
grammar = CFG.fromstring(
"""
# Grammatical productions.
S -> NP VP
NP -> Det N PP | Det N
VP -> V NP PP | V NP | V
PP -> P NP
# Lexical productions.
NP -> 'I'
Det -> 'the' | 'a'
N -> 'man' | 'park' | 'dog' | 'telescope'
V -> 'ate' | 'saw'
P -> 'in' | 'under' | 'with'
"""
)
sent = "the dog saw a man in the park".split()
RecursiveDescentApp(grammar, sent).mainloop() | Create a recursive descent parser demo, using a simple grammar and text. |
170,880 | import queue as q
import threading
from tkinter import (
END,
LEFT,
SUNKEN,
Button,
Frame,
IntVar,
Label,
Menu,
OptionMenu,
Scrollbar,
StringVar,
Text,
Tk,
)
from tkinter.font import Font
from nltk.corpus import (
alpino,
brown,
cess_cat,
cess_esp,
floresta,
indian,
mac_morpho,
machado,
nps_chat,
sinica_treebank,
treebank,
)
from nltk.probability import FreqDist
from nltk.util import in_idle
class CollocationsView:
_BACKGROUND_COLOUR = "#FFF" # white
def __init__(self):
self.queue = q.Queue()
self.model = CollocationsModel(self.queue)
self.top = Tk()
self._init_top(self.top)
self._init_menubar()
self._init_widgets(self.top)
self.load_corpus(self.model.DEFAULT_CORPUS)
self.after = self.top.after(POLL_INTERVAL, self._poll)
def _init_top(self, top):
top.geometry("550x650+50+50")
top.title("NLTK Collocations List")
top.bind("<Control-q>", self.destroy)
top.protocol("WM_DELETE_WINDOW", self.destroy)
top.minsize(550, 650)
def _init_widgets(self, parent):
self.main_frame = Frame(
parent, dict(background=self._BACKGROUND_COLOUR, padx=1, pady=1, border=1)
)
self._init_corpus_select(self.main_frame)
self._init_results_box(self.main_frame)
self._init_paging(self.main_frame)
self._init_status(self.main_frame)
self.main_frame.pack(fill="both", expand=True)
def _init_corpus_select(self, parent):
innerframe = Frame(parent, background=self._BACKGROUND_COLOUR)
self.var = StringVar(innerframe)
self.var.set(self.model.DEFAULT_CORPUS)
Label(
innerframe,
justify=LEFT,
text=" Corpus: ",
background=self._BACKGROUND_COLOUR,
padx=2,
pady=1,
border=0,
).pack(side="left")
other_corpora = list(self.model.CORPORA.keys()).remove(
self.model.DEFAULT_CORPUS
)
om = OptionMenu(
innerframe,
self.var,
self.model.DEFAULT_CORPUS,
command=self.corpus_selected,
*self.model.non_default_corpora()
)
om["borderwidth"] = 0
om["highlightthickness"] = 1
om.pack(side="left")
innerframe.pack(side="top", fill="x", anchor="n")
def _init_status(self, parent):
self.status = Label(
parent,
justify=LEFT,
relief=SUNKEN,
background=self._BACKGROUND_COLOUR,
border=0,
padx=1,
pady=0,
)
self.status.pack(side="top", anchor="sw")
def _init_menubar(self):
self._result_size = IntVar(self.top)
menubar = Menu(self.top)
filemenu = Menu(menubar, tearoff=0, borderwidth=0)
filemenu.add_command(
label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-q"
)
menubar.add_cascade(label="File", underline=0, menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
rescntmenu = Menu(editmenu, tearoff=0)
rescntmenu.add_radiobutton(
label="20",
variable=self._result_size,
underline=0,
value=20,
command=self.set_result_size,
)
rescntmenu.add_radiobutton(
label="50",
variable=self._result_size,
underline=0,
value=50,
command=self.set_result_size,
)
rescntmenu.add_radiobutton(
label="100",
variable=self._result_size,
underline=0,
value=100,
command=self.set_result_size,
)
rescntmenu.invoke(1)
editmenu.add_cascade(label="Result Count", underline=0, menu=rescntmenu)
menubar.add_cascade(label="Edit", underline=0, menu=editmenu)
self.top.config(menu=menubar)
def set_result_size(self, **kwargs):
self.model.result_count = self._result_size.get()
def _init_results_box(self, parent):
innerframe = Frame(parent)
i1 = Frame(innerframe)
i2 = Frame(innerframe)
vscrollbar = Scrollbar(i1, borderwidth=1)
hscrollbar = Scrollbar(i2, borderwidth=1, orient="horiz")
self.results_box = Text(
i1,
font=Font(family="courier", size="16"),
state="disabled",
borderwidth=1,
yscrollcommand=vscrollbar.set,
xscrollcommand=hscrollbar.set,
wrap="none",
width="40",
height="20",
exportselection=1,
)
self.results_box.pack(side="left", fill="both", expand=True)
vscrollbar.pack(side="left", fill="y", anchor="e")
vscrollbar.config(command=self.results_box.yview)
hscrollbar.pack(side="left", fill="x", expand=True, anchor="w")
hscrollbar.config(command=self.results_box.xview)
# there is no other way of avoiding the overlap of scrollbars while using pack layout manager!!!
Label(i2, text=" ", background=self._BACKGROUND_COLOUR).pack(
side="left", anchor="e"
)
i1.pack(side="top", fill="both", expand=True, anchor="n")
i2.pack(side="bottom", fill="x", anchor="s")
innerframe.pack(side="top", fill="both", expand=True)
def _init_paging(self, parent):
innerframe = Frame(parent, background=self._BACKGROUND_COLOUR)
self.prev = prev = Button(
innerframe,
text="Previous",
command=self.previous,
width="10",
borderwidth=1,
highlightthickness=1,
state="disabled",
)
prev.pack(side="left", anchor="center")
self.next = next = Button(
innerframe,
text="Next",
command=self.__next__,
width="10",
borderwidth=1,
highlightthickness=1,
state="disabled",
)
next.pack(side="right", anchor="center")
innerframe.pack(side="top", fill="y")
self.reset_current_page()
def reset_current_page(self):
self.current_page = -1
def _poll(self):
try:
event = self.queue.get(block=False)
except q.Empty:
pass
else:
if event == CORPUS_LOADED_EVENT:
self.handle_corpus_loaded(event)
elif event == ERROR_LOADING_CORPUS_EVENT:
self.handle_error_loading_corpus(event)
self.after = self.top.after(POLL_INTERVAL, self._poll)
def handle_error_loading_corpus(self, event):
self.status["text"] = "Error in loading " + self.var.get()
self.unfreeze_editable()
self.clear_results_box()
self.freeze_editable()
self.reset_current_page()
def handle_corpus_loaded(self, event):
self.status["text"] = self.var.get() + " is loaded"
self.unfreeze_editable()
self.clear_results_box()
self.reset_current_page()
# self.next()
collocations = self.model.next(self.current_page + 1)
self.write_results(collocations)
self.current_page += 1
def corpus_selected(self, *args):
new_selection = self.var.get()
self.load_corpus(new_selection)
def previous(self):
self.freeze_editable()
collocations = self.model.prev(self.current_page - 1)
self.current_page = self.current_page - 1
self.clear_results_box()
self.write_results(collocations)
self.unfreeze_editable()
def __next__(self):
self.freeze_editable()
collocations = self.model.next(self.current_page + 1)
self.clear_results_box()
self.write_results(collocations)
self.current_page += 1
self.unfreeze_editable()
def load_corpus(self, selection):
if self.model.selected_corpus != selection:
self.status["text"] = "Loading " + selection + "..."
self.freeze_editable()
self.model.load_corpus(selection)
def freeze_editable(self):
self.prev["state"] = "disabled"
self.next["state"] = "disabled"
def clear_results_box(self):
self.results_box["state"] = "normal"
self.results_box.delete("1.0", END)
self.results_box["state"] = "disabled"
def fire_event(self, event):
# Firing an event so that rendering of widgets happen in the mainloop thread
self.top.event_generate(event, when="tail")
def destroy(self, *e):
if self.top is None:
return
self.top.after_cancel(self.after)
self.top.destroy()
self.top = None
def mainloop(self, *args, **kwargs):
if in_idle():
return
self.top.mainloop(*args, **kwargs)
def unfreeze_editable(self):
self.set_paging_button_states()
def set_paging_button_states(self):
if self.current_page == -1 or self.current_page == 0:
self.prev["state"] = "disabled"
else:
self.prev["state"] = "normal"
if self.model.is_last_page(self.current_page):
self.next["state"] = "disabled"
else:
self.next["state"] = "normal"
def write_results(self, results):
self.results_box["state"] = "normal"
row = 1
for each in results:
self.results_box.insert(str(row) + ".0", each[0] + " " + each[1] + "\n")
row += 1
self.results_box["state"] = "disabled"
def app():
c = CollocationsView()
c.mainloop() | null |
170,881 | import queue as q
import re
import threading
from tkinter import (
END,
LEFT,
SUNKEN,
Button,
Entry,
Frame,
IntVar,
Label,
Menu,
OptionMenu,
Scrollbar,
StringVar,
Text,
Tk,
)
from tkinter.font import Font
from nltk.corpus import (
alpino,
brown,
cess_cat,
cess_esp,
floresta,
indian,
mac_morpho,
nps_chat,
sinica_treebank,
treebank,
)
from nltk.draw.util import ShowText
from nltk.util import in_idle
class ConcordanceSearchView:
_BACKGROUND_COLOUR = "#FFF" # white
# Colour of highlighted results
_HIGHLIGHT_WORD_COLOUR = "#F00" # red
_HIGHLIGHT_WORD_TAG = "HL_WRD_TAG"
_HIGHLIGHT_LABEL_COLOUR = "#C0C0C0" # dark grey
_HIGHLIGHT_LABEL_TAG = "HL_LBL_TAG"
# Percentage of text left of the scrollbar position
_FRACTION_LEFT_TEXT = 0.30
def __init__(self):
self.queue = q.Queue()
self.model = ConcordanceSearchModel(self.queue)
self.top = Tk()
self._init_top(self.top)
self._init_menubar()
self._init_widgets(self.top)
self.load_corpus(self.model.DEFAULT_CORPUS)
self.after = self.top.after(POLL_INTERVAL, self._poll)
def _init_top(self, top):
top.geometry("950x680+50+50")
top.title("NLTK Concordance Search")
top.bind("<Control-q>", self.destroy)
top.protocol("WM_DELETE_WINDOW", self.destroy)
top.minsize(950, 680)
def _init_widgets(self, parent):
self.main_frame = Frame(
parent, dict(background=self._BACKGROUND_COLOUR, padx=1, pady=1, border=1)
)
self._init_corpus_select(self.main_frame)
self._init_query_box(self.main_frame)
self._init_results_box(self.main_frame)
self._init_paging(self.main_frame)
self._init_status(self.main_frame)
self.main_frame.pack(fill="both", expand=True)
def _init_menubar(self):
self._result_size = IntVar(self.top)
self._cntx_bf_len = IntVar(self.top)
self._cntx_af_len = IntVar(self.top)
menubar = Menu(self.top)
filemenu = Menu(menubar, tearoff=0, borderwidth=0)
filemenu.add_command(
label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-q"
)
menubar.add_cascade(label="File", underline=0, menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
rescntmenu = Menu(editmenu, tearoff=0)
rescntmenu.add_radiobutton(
label="20",
variable=self._result_size,
underline=0,
value=20,
command=self.set_result_size,
)
rescntmenu.add_radiobutton(
label="50",
variable=self._result_size,
underline=0,
value=50,
command=self.set_result_size,
)
rescntmenu.add_radiobutton(
label="100",
variable=self._result_size,
underline=0,
value=100,
command=self.set_result_size,
)
rescntmenu.invoke(1)
editmenu.add_cascade(label="Result Count", underline=0, menu=rescntmenu)
cntxmenu = Menu(editmenu, tearoff=0)
cntxbfmenu = Menu(cntxmenu, tearoff=0)
cntxbfmenu.add_radiobutton(
label="60 characters",
variable=self._cntx_bf_len,
underline=0,
value=60,
command=self.set_cntx_bf_len,
)
cntxbfmenu.add_radiobutton(
label="80 characters",
variable=self._cntx_bf_len,
underline=0,
value=80,
command=self.set_cntx_bf_len,
)
cntxbfmenu.add_radiobutton(
label="100 characters",
variable=self._cntx_bf_len,
underline=0,
value=100,
command=self.set_cntx_bf_len,
)
cntxbfmenu.invoke(1)
cntxmenu.add_cascade(label="Before", underline=0, menu=cntxbfmenu)
cntxafmenu = Menu(cntxmenu, tearoff=0)
cntxafmenu.add_radiobutton(
label="70 characters",
variable=self._cntx_af_len,
underline=0,
value=70,
command=self.set_cntx_af_len,
)
cntxafmenu.add_radiobutton(
label="90 characters",
variable=self._cntx_af_len,
underline=0,
value=90,
command=self.set_cntx_af_len,
)
cntxafmenu.add_radiobutton(
label="110 characters",
variable=self._cntx_af_len,
underline=0,
value=110,
command=self.set_cntx_af_len,
)
cntxafmenu.invoke(1)
cntxmenu.add_cascade(label="After", underline=0, menu=cntxafmenu)
editmenu.add_cascade(label="Context", underline=0, menu=cntxmenu)
menubar.add_cascade(label="Edit", underline=0, menu=editmenu)
self.top.config(menu=menubar)
def set_result_size(self, **kwargs):
self.model.result_count = self._result_size.get()
def set_cntx_af_len(self, **kwargs):
self._char_after = self._cntx_af_len.get()
def set_cntx_bf_len(self, **kwargs):
self._char_before = self._cntx_bf_len.get()
def _init_corpus_select(self, parent):
innerframe = Frame(parent, background=self._BACKGROUND_COLOUR)
self.var = StringVar(innerframe)
self.var.set(self.model.DEFAULT_CORPUS)
Label(
innerframe,
justify=LEFT,
text=" Corpus: ",
background=self._BACKGROUND_COLOUR,
padx=2,
pady=1,
border=0,
).pack(side="left")
other_corpora = list(self.model.CORPORA.keys()).remove(
self.model.DEFAULT_CORPUS
)
om = OptionMenu(
innerframe,
self.var,
self.model.DEFAULT_CORPUS,
command=self.corpus_selected,
*self.model.non_default_corpora()
)
om["borderwidth"] = 0
om["highlightthickness"] = 1
om.pack(side="left")
innerframe.pack(side="top", fill="x", anchor="n")
def _init_status(self, parent):
self.status = Label(
parent,
justify=LEFT,
relief=SUNKEN,
background=self._BACKGROUND_COLOUR,
border=0,
padx=1,
pady=0,
)
self.status.pack(side="top", anchor="sw")
def _init_query_box(self, parent):
innerframe = Frame(parent, background=self._BACKGROUND_COLOUR)
another = Frame(innerframe, background=self._BACKGROUND_COLOUR)
self.query_box = Entry(another, width=60)
self.query_box.pack(side="left", fill="x", pady=25, anchor="center")
self.search_button = Button(
another,
text="Search",
command=self.search,
borderwidth=1,
highlightthickness=1,
)
self.search_button.pack(side="left", fill="x", pady=25, anchor="center")
self.query_box.bind("<KeyPress-Return>", self.search_enter_keypress_handler)
another.pack()
innerframe.pack(side="top", fill="x", anchor="n")
def search_enter_keypress_handler(self, *event):
self.search()
def _init_results_box(self, parent):
innerframe = Frame(parent)
i1 = Frame(innerframe)
i2 = Frame(innerframe)
vscrollbar = Scrollbar(i1, borderwidth=1)
hscrollbar = Scrollbar(i2, borderwidth=1, orient="horiz")
self.results_box = Text(
i1,
font=Font(family="courier", size="16"),
state="disabled",
borderwidth=1,
yscrollcommand=vscrollbar.set,
xscrollcommand=hscrollbar.set,
wrap="none",
width="40",
height="20",
exportselection=1,
)
self.results_box.pack(side="left", fill="both", expand=True)
self.results_box.tag_config(
self._HIGHLIGHT_WORD_TAG, foreground=self._HIGHLIGHT_WORD_COLOUR
)
self.results_box.tag_config(
self._HIGHLIGHT_LABEL_TAG, foreground=self._HIGHLIGHT_LABEL_COLOUR
)
vscrollbar.pack(side="left", fill="y", anchor="e")
vscrollbar.config(command=self.results_box.yview)
hscrollbar.pack(side="left", fill="x", expand=True, anchor="w")
hscrollbar.config(command=self.results_box.xview)
# there is no other way of avoiding the overlap of scrollbars while using pack layout manager!!!
Label(i2, text=" ", background=self._BACKGROUND_COLOUR).pack(
side="left", anchor="e"
)
i1.pack(side="top", fill="both", expand=True, anchor="n")
i2.pack(side="bottom", fill="x", anchor="s")
innerframe.pack(side="top", fill="both", expand=True)
def _init_paging(self, parent):
innerframe = Frame(parent, background=self._BACKGROUND_COLOUR)
self.prev = prev = Button(
innerframe,
text="Previous",
command=self.previous,
width="10",
borderwidth=1,
highlightthickness=1,
state="disabled",
)
prev.pack(side="left", anchor="center")
self.next = next = Button(
innerframe,
text="Next",
command=self.__next__,
width="10",
borderwidth=1,
highlightthickness=1,
state="disabled",
)
next.pack(side="right", anchor="center")
innerframe.pack(side="top", fill="y")
self.current_page = 0
def previous(self):
self.clear_results_box()
self.freeze_editable()
self.model.prev(self.current_page - 1)
def __next__(self):
self.clear_results_box()
self.freeze_editable()
self.model.next(self.current_page + 1)
def about(self, *e):
ABOUT = "NLTK Concordance Search Demo\n"
TITLE = "About: NLTK Concordance Search Demo"
try:
from tkinter.messagebox import Message
Message(message=ABOUT, title=TITLE, parent=self.main_frame).show()
except:
ShowText(self.top, TITLE, ABOUT)
def _bind_event_handlers(self):
self.top.bind(CORPUS_LOADED_EVENT, self.handle_corpus_loaded)
self.top.bind(SEARCH_TERMINATED_EVENT, self.handle_search_terminated)
self.top.bind(SEARCH_ERROR_EVENT, self.handle_search_error)
self.top.bind(ERROR_LOADING_CORPUS_EVENT, self.handle_error_loading_corpus)
def _poll(self):
try:
event = self.queue.get(block=False)
except q.Empty:
pass
else:
if event == CORPUS_LOADED_EVENT:
self.handle_corpus_loaded(event)
elif event == SEARCH_TERMINATED_EVENT:
self.handle_search_terminated(event)
elif event == SEARCH_ERROR_EVENT:
self.handle_search_error(event)
elif event == ERROR_LOADING_CORPUS_EVENT:
self.handle_error_loading_corpus(event)
self.after = self.top.after(POLL_INTERVAL, self._poll)
def handle_error_loading_corpus(self, event):
self.status["text"] = "Error in loading " + self.var.get()
self.unfreeze_editable()
self.clear_all()
self.freeze_editable()
def handle_corpus_loaded(self, event):
self.status["text"] = self.var.get() + " is loaded"
self.unfreeze_editable()
self.clear_all()
self.query_box.focus_set()
def handle_search_terminated(self, event):
# todo: refactor the model such that it is less state sensitive
results = self.model.get_results()
self.write_results(results)
self.status["text"] = ""
if len(results) == 0:
self.status["text"] = "No results found for " + self.model.query
else:
self.current_page = self.model.last_requested_page
self.unfreeze_editable()
self.results_box.xview_moveto(self._FRACTION_LEFT_TEXT)
def handle_search_error(self, event):
self.status["text"] = "Error in query " + self.model.query
self.unfreeze_editable()
def corpus_selected(self, *args):
new_selection = self.var.get()
self.load_corpus(new_selection)
def load_corpus(self, selection):
if self.model.selected_corpus != selection:
self.status["text"] = "Loading " + selection + "..."
self.freeze_editable()
self.model.load_corpus(selection)
def search(self):
self.current_page = 0
self.clear_results_box()
self.model.reset_results()
query = self.query_box.get()
if len(query.strip()) == 0:
return
self.status["text"] = "Searching for " + query
self.freeze_editable()
self.model.search(query, self.current_page + 1)
def write_results(self, results):
self.results_box["state"] = "normal"
row = 1
for each in results:
sent, pos1, pos2 = each[0].strip(), each[1], each[2]
if len(sent) != 0:
if pos1 < self._char_before:
sent, pos1, pos2 = self.pad(sent, pos1, pos2)
sentence = sent[pos1 - self._char_before : pos1 + self._char_after]
if not row == len(results):
sentence += "\n"
self.results_box.insert(str(row) + ".0", sentence)
word_markers, label_markers = self.words_and_labels(sent, pos1, pos2)
for marker in word_markers:
self.results_box.tag_add(
self._HIGHLIGHT_WORD_TAG,
str(row) + "." + str(marker[0]),
str(row) + "." + str(marker[1]),
)
for marker in label_markers:
self.results_box.tag_add(
self._HIGHLIGHT_LABEL_TAG,
str(row) + "." + str(marker[0]),
str(row) + "." + str(marker[1]),
)
row += 1
self.results_box["state"] = "disabled"
def words_and_labels(self, sentence, pos1, pos2):
search_exp = sentence[pos1:pos2]
words, labels = [], []
labeled_words = search_exp.split(" ")
index = 0
for each in labeled_words:
if each == "":
index += 1
else:
word, label = each.split("/")
words.append(
(self._char_before + index, self._char_before + index + len(word))
)
index += len(word) + 1
labels.append(
(self._char_before + index, self._char_before + index + len(label))
)
index += len(label)
index += 1
return words, labels
def pad(self, sent, hstart, hend):
if hstart >= self._char_before:
return sent, hstart, hend
d = self._char_before - hstart
sent = "".join([" "] * d) + sent
return sent, hstart + d, hend + d
def destroy(self, *e):
if self.top is None:
return
self.top.after_cancel(self.after)
self.top.destroy()
self.top = None
def clear_all(self):
self.query_box.delete(0, END)
self.model.reset_query()
self.clear_results_box()
def clear_results_box(self):
self.results_box["state"] = "normal"
self.results_box.delete("1.0", END)
self.results_box["state"] = "disabled"
def freeze_editable(self):
self.query_box["state"] = "disabled"
self.search_button["state"] = "disabled"
self.prev["state"] = "disabled"
self.next["state"] = "disabled"
def unfreeze_editable(self):
self.query_box["state"] = "normal"
self.search_button["state"] = "normal"
self.set_paging_button_states()
def set_paging_button_states(self):
if self.current_page == 0 or self.current_page == 1:
self.prev["state"] = "disabled"
else:
self.prev["state"] = "normal"
if self.model.has_more_pages(self.current_page):
self.next["state"] = "normal"
else:
self.next["state"] = "disabled"
def fire_event(self, event):
# Firing an event so that rendering of widgets happen in the mainloop thread
self.top.event_generate(event, when="tail")
def mainloop(self, *args, **kwargs):
if in_idle():
return
self.top.mainloop(*args, **kwargs)
def app():
d = ConcordanceSearchView()
d.mainloop() | null |
170,882 | from tkinter import Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk
from tkinter.font import Font
from nltk.draw import CFGEditor, TreeSegmentWidget, tree_to_treesegment
from nltk.draw.util import CanvasFrame, EntryDialog, ShowText, TextWidget
from nltk.parse import SteppingShiftReduceParser
from nltk.tree import Tree
from nltk.util import in_idle
class ShiftReduceApp:
"""
A graphical tool for exploring the shift-reduce parser. The tool
displays the parser's stack and the remaining text, and allows the
user to control the parser's operation. In particular, the user
can shift tokens onto the stack, and can perform reductions on the
top elements of the stack. A "step" button simply steps through
the parsing process, performing the operations that
``nltk.parse.ShiftReduceParser`` would use.
"""
def __init__(self, grammar, sent, trace=0):
self._sent = sent
self._parser = SteppingShiftReduceParser(grammar, trace)
# Set up the main window.
self._top = Tk()
self._top.title("Shift Reduce Parser Application")
# Animations. animating_lock is a lock to prevent the demo
# from performing new operations while it's animating.
self._animating_lock = 0
self._animate = IntVar(self._top)
self._animate.set(10) # = medium
# The user can hide the grammar.
self._show_grammar = IntVar(self._top)
self._show_grammar.set(1)
# Initialize fonts.
self._init_fonts(self._top)
# Set up key bindings.
self._init_bindings()
# Create the basic frames.
self._init_menubar(self._top)
self._init_buttons(self._top)
self._init_feedback(self._top)
self._init_grammar(self._top)
self._init_canvas(self._top)
# A popup menu for reducing.
self._reduce_menu = Menu(self._canvas, tearoff=0)
# Reset the demo, and set the feedback frame to empty.
self.reset()
self._lastoper1["text"] = ""
#########################################
## Initialization Helpers
#########################################
def _init_fonts(self, root):
# See: <http://www.astro.washington.edu/owen/ROTKFolklore.html>
self._sysfont = Font(font=Button()["font"])
root.option_add("*Font", self._sysfont)
# TWhat's our font size (default=same as sysfont)
self._size = IntVar(root)
self._size.set(self._sysfont.cget("size"))
self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get())
self._font = Font(family="helvetica", size=self._size.get())
def _init_grammar(self, parent):
# Grammar view.
self._prodframe = listframe = Frame(parent)
self._prodframe.pack(fill="both", side="left", padx=2)
self._prodlist_label = Label(
self._prodframe, font=self._boldfont, text="Available Reductions"
)
self._prodlist_label.pack()
self._prodlist = Listbox(
self._prodframe,
selectmode="single",
relief="groove",
background="white",
foreground="#909090",
font=self._font,
selectforeground="#004040",
selectbackground="#c0f0c0",
)
self._prodlist.pack(side="right", fill="both", expand=1)
self._productions = list(self._parser.grammar().productions())
for production in self._productions:
self._prodlist.insert("end", (" %s" % production))
self._prodlist.config(height=min(len(self._productions), 25))
# Add a scrollbar if there are more than 25 productions.
if 1: # len(self._productions) > 25:
listscroll = Scrollbar(self._prodframe, orient="vertical")
self._prodlist.config(yscrollcommand=listscroll.set)
listscroll.config(command=self._prodlist.yview)
listscroll.pack(side="left", fill="y")
# If they select a production, apply it.
self._prodlist.bind("<<ListboxSelect>>", self._prodlist_select)
# When they hover over a production, highlight it.
self._hover = -1
self._prodlist.bind("<Motion>", self._highlight_hover)
self._prodlist.bind("<Leave>", self._clear_hover)
def _init_bindings(self):
# Quit
self._top.bind("<Control-q>", self.destroy)
self._top.bind("<Control-x>", self.destroy)
self._top.bind("<Alt-q>", self.destroy)
self._top.bind("<Alt-x>", self.destroy)
# Ops (step, shift, reduce, undo)
self._top.bind("<space>", self.step)
self._top.bind("<s>", self.shift)
self._top.bind("<Alt-s>", self.shift)
self._top.bind("<Control-s>", self.shift)
self._top.bind("<r>", self.reduce)
self._top.bind("<Alt-r>", self.reduce)
self._top.bind("<Control-r>", self.reduce)
self._top.bind("<Delete>", self.reset)
self._top.bind("<u>", self.undo)
self._top.bind("<Alt-u>", self.undo)
self._top.bind("<Control-u>", self.undo)
self._top.bind("<Control-z>", self.undo)
self._top.bind("<BackSpace>", self.undo)
# Misc
self._top.bind("<Control-p>", self.postscript)
self._top.bind("<Control-h>", self.help)
self._top.bind("<F1>", self.help)
self._top.bind("<Control-g>", self.edit_grammar)
self._top.bind("<Control-t>", self.edit_sentence)
# Animation speed control
self._top.bind("-", lambda e, a=self._animate: a.set(20))
self._top.bind("=", lambda e, a=self._animate: a.set(10))
self._top.bind("+", lambda e, a=self._animate: a.set(4))
def _init_buttons(self, parent):
# Set up the frames.
self._buttonframe = buttonframe = Frame(parent)
buttonframe.pack(fill="none", side="bottom")
Button(
buttonframe,
text="Step",
background="#90c0d0",
foreground="black",
command=self.step,
).pack(side="left")
Button(
buttonframe,
text="Shift",
underline=0,
background="#90f090",
foreground="black",
command=self.shift,
).pack(side="left")
Button(
buttonframe,
text="Reduce",
underline=0,
background="#90f090",
foreground="black",
command=self.reduce,
).pack(side="left")
Button(
buttonframe,
text="Undo",
underline=0,
background="#f0a0a0",
foreground="black",
command=self.undo,
).pack(side="left")
def _init_menubar(self, parent):
menubar = Menu(parent)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(
label="Reset Parser", underline=0, command=self.reset, accelerator="Del"
)
filemenu.add_command(
label="Print to Postscript",
underline=0,
command=self.postscript,
accelerator="Ctrl-p",
)
filemenu.add_command(
label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x"
)
menubar.add_cascade(label="File", underline=0, menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(
label="Edit Grammar",
underline=5,
command=self.edit_grammar,
accelerator="Ctrl-g",
)
editmenu.add_command(
label="Edit Text",
underline=5,
command=self.edit_sentence,
accelerator="Ctrl-t",
)
menubar.add_cascade(label="Edit", underline=0, menu=editmenu)
rulemenu = Menu(menubar, tearoff=0)
rulemenu.add_command(
label="Step", underline=1, command=self.step, accelerator="Space"
)
rulemenu.add_separator()
rulemenu.add_command(
label="Shift", underline=0, command=self.shift, accelerator="Ctrl-s"
)
rulemenu.add_command(
label="Reduce", underline=0, command=self.reduce, accelerator="Ctrl-r"
)
rulemenu.add_separator()
rulemenu.add_command(
label="Undo", underline=0, command=self.undo, accelerator="Ctrl-u"
)
menubar.add_cascade(label="Apply", underline=0, menu=rulemenu)
viewmenu = Menu(menubar, tearoff=0)
viewmenu.add_checkbutton(
label="Show Grammar",
underline=0,
variable=self._show_grammar,
command=self._toggle_grammar,
)
viewmenu.add_separator()
viewmenu.add_radiobutton(
label="Tiny",
variable=self._size,
underline=0,
value=10,
command=self.resize,
)
viewmenu.add_radiobutton(
label="Small",
variable=self._size,
underline=0,
value=12,
command=self.resize,
)
viewmenu.add_radiobutton(
label="Medium",
variable=self._size,
underline=0,
value=14,
command=self.resize,
)
viewmenu.add_radiobutton(
label="Large",
variable=self._size,
underline=0,
value=18,
command=self.resize,
)
viewmenu.add_radiobutton(
label="Huge",
variable=self._size,
underline=0,
value=24,
command=self.resize,
)
menubar.add_cascade(label="View", underline=0, menu=viewmenu)
animatemenu = Menu(menubar, tearoff=0)
animatemenu.add_radiobutton(
label="No Animation", underline=0, variable=self._animate, value=0
)
animatemenu.add_radiobutton(
label="Slow Animation",
underline=0,
variable=self._animate,
value=20,
accelerator="-",
)
animatemenu.add_radiobutton(
label="Normal Animation",
underline=0,
variable=self._animate,
value=10,
accelerator="=",
)
animatemenu.add_radiobutton(
label="Fast Animation",
underline=0,
variable=self._animate,
value=4,
accelerator="+",
)
menubar.add_cascade(label="Animate", underline=1, menu=animatemenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", underline=0, command=self.about)
helpmenu.add_command(
label="Instructions", underline=0, command=self.help, accelerator="F1"
)
menubar.add_cascade(label="Help", underline=0, menu=helpmenu)
parent.config(menu=menubar)
def _init_feedback(self, parent):
self._feedbackframe = feedbackframe = Frame(parent)
feedbackframe.pack(fill="x", side="bottom", padx=3, pady=3)
self._lastoper_label = Label(
feedbackframe, text="Last Operation:", font=self._font
)
self._lastoper_label.pack(side="left")
lastoperframe = Frame(feedbackframe, relief="sunken", border=1)
lastoperframe.pack(fill="x", side="right", expand=1, padx=5)
self._lastoper1 = Label(
lastoperframe, foreground="#007070", background="#f0f0f0", font=self._font
)
self._lastoper2 = Label(
lastoperframe,
anchor="w",
width=30,
foreground="#004040",
background="#f0f0f0",
font=self._font,
)
self._lastoper1.pack(side="left")
self._lastoper2.pack(side="left", fill="x", expand=1)
def _init_canvas(self, parent):
self._cframe = CanvasFrame(
parent,
background="white",
width=525,
closeenough=10,
border=2,
relief="sunken",
)
self._cframe.pack(expand=1, fill="both", side="top", pady=2)
canvas = self._canvas = self._cframe.canvas()
self._stackwidgets = []
self._rtextwidgets = []
self._titlebar = canvas.create_rectangle(
0, 0, 0, 0, fill="#c0f0f0", outline="black"
)
self._exprline = canvas.create_line(0, 0, 0, 0, dash=".")
self._stacktop = canvas.create_line(0, 0, 0, 0, fill="#408080")
size = self._size.get() + 4
self._stacklabel = TextWidget(
canvas, "Stack", color="#004040", font=self._boldfont
)
self._rtextlabel = TextWidget(
canvas, "Remaining Text", color="#004040", font=self._boldfont
)
self._cframe.add_widget(self._stacklabel)
self._cframe.add_widget(self._rtextlabel)
#########################################
## Main draw procedure
#########################################
def _redraw(self):
scrollregion = self._canvas["scrollregion"].split()
(cx1, cy1, cx2, cy2) = (int(c) for c in scrollregion)
# Delete the old stack & rtext widgets.
for stackwidget in self._stackwidgets:
self._cframe.destroy_widget(stackwidget)
self._stackwidgets = []
for rtextwidget in self._rtextwidgets:
self._cframe.destroy_widget(rtextwidget)
self._rtextwidgets = []
# Position the titlebar & exprline
(x1, y1, x2, y2) = self._stacklabel.bbox()
y = y2 - y1 + 10
self._canvas.coords(self._titlebar, -5000, 0, 5000, y - 4)
self._canvas.coords(self._exprline, 0, y * 2 - 10, 5000, y * 2 - 10)
# Position the titlebar labels..
(x1, y1, x2, y2) = self._stacklabel.bbox()
self._stacklabel.move(5 - x1, 3 - y1)
(x1, y1, x2, y2) = self._rtextlabel.bbox()
self._rtextlabel.move(cx2 - x2 - 5, 3 - y1)
# Draw the stack.
stackx = 5
for tok in self._parser.stack():
if isinstance(tok, Tree):
attribs = {
"tree_color": "#4080a0",
"tree_width": 2,
"node_font": self._boldfont,
"node_color": "#006060",
"leaf_color": "#006060",
"leaf_font": self._font,
}
widget = tree_to_treesegment(self._canvas, tok, **attribs)
widget.label()["color"] = "#000000"
else:
widget = TextWidget(self._canvas, tok, color="#000000", font=self._font)
widget.bind_click(self._popup_reduce)
self._stackwidgets.append(widget)
self._cframe.add_widget(widget, stackx, y)
stackx = widget.bbox()[2] + 10
# Draw the remaining text.
rtextwidth = 0
for tok in self._parser.remaining_text():
widget = TextWidget(self._canvas, tok, color="#000000", font=self._font)
self._rtextwidgets.append(widget)
self._cframe.add_widget(widget, rtextwidth, y)
rtextwidth = widget.bbox()[2] + 4
# Allow enough room to shift the next token (for animations)
if len(self._rtextwidgets) > 0:
stackx += self._rtextwidgets[0].width()
# Move the remaining text to the correct location (keep it
# right-justified, when possible); and move the remaining text
# label, if necessary.
stackx = max(stackx, self._stacklabel.width() + 25)
rlabelwidth = self._rtextlabel.width() + 10
if stackx >= cx2 - max(rtextwidth, rlabelwidth):
cx2 = stackx + max(rtextwidth, rlabelwidth)
for rtextwidget in self._rtextwidgets:
rtextwidget.move(4 + cx2 - rtextwidth, 0)
self._rtextlabel.move(cx2 - self._rtextlabel.bbox()[2] - 5, 0)
midx = (stackx + cx2 - max(rtextwidth, rlabelwidth)) / 2
self._canvas.coords(self._stacktop, midx, 0, midx, 5000)
(x1, y1, x2, y2) = self._stacklabel.bbox()
# Set up binding to allow them to shift a token by dragging it.
if len(self._rtextwidgets) > 0:
def drag_shift(widget, midx=midx, self=self):
if widget.bbox()[0] < midx:
self.shift()
else:
self._redraw()
self._rtextwidgets[0].bind_drag(drag_shift)
self._rtextwidgets[0].bind_click(self.shift)
# Draw the stack top.
self._highlight_productions()
def _draw_stack_top(self, widget):
# hack..
midx = widget.bbox()[2] + 50
self._canvas.coords(self._stacktop, midx, 0, midx, 5000)
def _highlight_productions(self):
# Highlight the productions that can be reduced.
self._prodlist.selection_clear(0, "end")
for prod in self._parser.reducible_productions():
index = self._productions.index(prod)
self._prodlist.selection_set(index)
#########################################
## Button Callbacks
#########################################
def destroy(self, *e):
if self._top is None:
return
self._top.destroy()
self._top = None
def reset(self, *e):
self._parser.initialize(self._sent)
self._lastoper1["text"] = "Reset App"
self._lastoper2["text"] = ""
self._redraw()
def step(self, *e):
if self.reduce():
return True
elif self.shift():
return True
else:
if list(self._parser.parses()):
self._lastoper1["text"] = "Finished:"
self._lastoper2["text"] = "Success"
else:
self._lastoper1["text"] = "Finished:"
self._lastoper2["text"] = "Failure"
def shift(self, *e):
if self._animating_lock:
return
if self._parser.shift():
tok = self._parser.stack()[-1]
self._lastoper1["text"] = "Shift:"
self._lastoper2["text"] = "%r" % tok
if self._animate.get():
self._animate_shift()
else:
self._redraw()
return True
return False
def reduce(self, *e):
if self._animating_lock:
return
production = self._parser.reduce()
if production:
self._lastoper1["text"] = "Reduce:"
self._lastoper2["text"] = "%s" % production
if self._animate.get():
self._animate_reduce()
else:
self._redraw()
return production
def undo(self, *e):
if self._animating_lock:
return
if self._parser.undo():
self._redraw()
def postscript(self, *e):
self._cframe.print_to_file()
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this demo is created from a non-interactive program (e.g.
from a secript); otherwise, the demo will close as soon as
the script completes.
"""
if in_idle():
return
self._top.mainloop(*args, **kwargs)
#########################################
## Menubar callbacks
#########################################
def resize(self, size=None):
if size is not None:
self._size.set(size)
size = self._size.get()
self._font.configure(size=-(abs(size)))
self._boldfont.configure(size=-(abs(size)))
self._sysfont.configure(size=-(abs(size)))
# self._stacklabel['font'] = ('helvetica', -size-4, 'bold')
# self._rtextlabel['font'] = ('helvetica', -size-4, 'bold')
# self._lastoper_label['font'] = ('helvetica', -size)
# self._lastoper1['font'] = ('helvetica', -size)
# self._lastoper2['font'] = ('helvetica', -size)
# self._prodlist['font'] = ('helvetica', -size)
# self._prodlist_label['font'] = ('helvetica', -size-2, 'bold')
self._redraw()
def help(self, *e):
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(
self._top,
"Help: Shift-Reduce Parser Application",
(__doc__ or "").strip(),
width=75,
font="fixed",
)
except:
ShowText(
self._top,
"Help: Shift-Reduce Parser Application",
(__doc__ or "").strip(),
width=75,
)
def about(self, *e):
ABOUT = "NLTK Shift-Reduce Parser Application\n" + "Written by Edward Loper"
TITLE = "About: Shift-Reduce Parser Application"
try:
from tkinter.messagebox import Message
Message(message=ABOUT, title=TITLE).show()
except:
ShowText(self._top, TITLE, ABOUT)
def edit_grammar(self, *e):
CFGEditor(self._top, self._parser.grammar(), self.set_grammar)
def set_grammar(self, grammar):
self._parser.set_grammar(grammar)
self._productions = list(grammar.productions())
self._prodlist.delete(0, "end")
for production in self._productions:
self._prodlist.insert("end", (" %s" % production))
def edit_sentence(self, *e):
sentence = " ".join(self._sent)
title = "Edit Text"
instr = "Enter a new sentence to parse."
EntryDialog(self._top, sentence, instr, self.set_sentence, title)
def set_sentence(self, sent):
self._sent = sent.split() # [XX] use tagged?
self.reset()
#########################################
## Reduce Production Selection
#########################################
def _toggle_grammar(self, *e):
if self._show_grammar.get():
self._prodframe.pack(
fill="both", side="left", padx=2, after=self._feedbackframe
)
self._lastoper1["text"] = "Show Grammar"
else:
self._prodframe.pack_forget()
self._lastoper1["text"] = "Hide Grammar"
self._lastoper2["text"] = ""
def _prodlist_select(self, event):
selection = self._prodlist.curselection()
if len(selection) != 1:
return
index = int(selection[0])
production = self._parser.reduce(self._productions[index])
if production:
self._lastoper1["text"] = "Reduce:"
self._lastoper2["text"] = "%s" % production
if self._animate.get():
self._animate_reduce()
else:
self._redraw()
else:
# Reset the production selections.
self._prodlist.selection_clear(0, "end")
for prod in self._parser.reducible_productions():
index = self._productions.index(prod)
self._prodlist.selection_set(index)
def _popup_reduce(self, widget):
# Remove old commands.
productions = self._parser.reducible_productions()
if len(productions) == 0:
return
self._reduce_menu.delete(0, "end")
for production in productions:
self._reduce_menu.add_command(label=str(production), command=self.reduce)
self._reduce_menu.post(
self._canvas.winfo_pointerx(), self._canvas.winfo_pointery()
)
#########################################
## Animations
#########################################
def _animate_shift(self):
# What widget are we shifting?
widget = self._rtextwidgets[0]
# Where are we shifting from & to?
right = widget.bbox()[0]
if len(self._stackwidgets) == 0:
left = 5
else:
left = self._stackwidgets[-1].bbox()[2] + 10
# Start animating.
dt = self._animate.get()
dx = (left - right) * 1.0 / dt
self._animate_shift_frame(dt, widget, dx)
def _animate_shift_frame(self, frame, widget, dx):
if frame > 0:
self._animating_lock = 1
widget.move(dx, 0)
self._top.after(10, self._animate_shift_frame, frame - 1, widget, dx)
else:
# but: stacktop??
# Shift the widget to the stack.
del self._rtextwidgets[0]
self._stackwidgets.append(widget)
self._animating_lock = 0
# Display the available productions.
self._draw_stack_top(widget)
self._highlight_productions()
def _animate_reduce(self):
# What widgets are we shifting?
numwidgets = len(self._parser.stack()[-1]) # number of children
widgets = self._stackwidgets[-numwidgets:]
# How far are we moving?
if isinstance(widgets[0], TreeSegmentWidget):
ydist = 15 + widgets[0].label().height()
else:
ydist = 15 + widgets[0].height()
# Start animating.
dt = self._animate.get()
dy = ydist * 2.0 / dt
self._animate_reduce_frame(dt / 2, widgets, dy)
def _animate_reduce_frame(self, frame, widgets, dy):
if frame > 0:
self._animating_lock = 1
for widget in widgets:
widget.move(0, dy)
self._top.after(10, self._animate_reduce_frame, frame - 1, widgets, dy)
else:
del self._stackwidgets[-len(widgets) :]
for widget in widgets:
self._cframe.remove_widget(widget)
tok = self._parser.stack()[-1]
if not isinstance(tok, Tree):
raise ValueError()
label = TextWidget(
self._canvas, str(tok.label()), color="#006060", font=self._boldfont
)
widget = TreeSegmentWidget(self._canvas, label, widgets, width=2)
(x1, y1, x2, y2) = self._stacklabel.bbox()
y = y2 - y1 + 10
if not self._stackwidgets:
x = 5
else:
x = self._stackwidgets[-1].bbox()[2] + 10
self._cframe.add_widget(widget, x, y)
self._stackwidgets.append(widget)
# Display the available productions.
self._draw_stack_top(widget)
self._highlight_productions()
# # Delete the old widgets..
# del self._stackwidgets[-len(widgets):]
# for widget in widgets:
# self._cframe.destroy_widget(widget)
#
# # Make a new one.
# tok = self._parser.stack()[-1]
# if isinstance(tok, Tree):
# attribs = {'tree_color': '#4080a0', 'tree_width': 2,
# 'node_font': bold, 'node_color': '#006060',
# 'leaf_color': '#006060', 'leaf_font':self._font}
# widget = tree_to_treesegment(self._canvas, tok.type(),
# **attribs)
# widget.node()['color'] = '#000000'
# else:
# widget = TextWidget(self._canvas, tok.type(),
# color='#000000', font=self._font)
# widget.bind_click(self._popup_reduce)
# (x1, y1, x2, y2) = self._stacklabel.bbox()
# y = y2-y1+10
# if not self._stackwidgets: x = 5
# else: x = self._stackwidgets[-1].bbox()[2] + 10
# self._cframe.add_widget(widget, x, y)
# self._stackwidgets.append(widget)
# self._redraw()
self._animating_lock = 0
#########################################
## Hovering.
#########################################
def _highlight_hover(self, event):
# What production are we hovering over?
index = self._prodlist.nearest(event.y)
if self._hover == index:
return
# Clear any previous hover highlighting.
self._clear_hover()
# If the production corresponds to an available reduction,
# highlight the stack.
selection = [int(s) for s in self._prodlist.curselection()]
if index in selection:
rhslen = len(self._productions[index].rhs())
for stackwidget in self._stackwidgets[-rhslen:]:
if isinstance(stackwidget, TreeSegmentWidget):
stackwidget.label()["color"] = "#00a000"
else:
stackwidget["color"] = "#00a000"
# Remember what production we're hovering over.
self._hover = index
def _clear_hover(self, *event):
# Clear any previous hover highlighting.
if self._hover == -1:
return
self._hover = -1
for stackwidget in self._stackwidgets:
if isinstance(stackwidget, TreeSegmentWidget):
stackwidget.label()["color"] = "black"
else:
stackwidget["color"] = "black"
class Nonterminal:
"""
A non-terminal symbol for a context free grammar. ``Nonterminal``
is a wrapper class for node values; it is used by ``Production``
objects to distinguish node values from leaf values.
The node value that is wrapped by a ``Nonterminal`` is known as its
"symbol". Symbols are typically strings representing phrasal
categories (such as ``"NP"`` or ``"VP"``). However, more complex
symbol types are sometimes used (e.g., for lexicalized grammars).
Since symbols are node values, they must be immutable and
hashable. Two ``Nonterminals`` are considered equal if their
symbols are equal.
:see: ``CFG``, ``Production``
:type _symbol: any
:ivar _symbol: The node value corresponding to this
``Nonterminal``. This value must be immutable and hashable.
"""
def __init__(self, symbol):
"""
Construct a new non-terminal from the given symbol.
:type symbol: any
:param symbol: The node value corresponding to this
``Nonterminal``. This value must be immutable and
hashable.
"""
self._symbol = symbol
def symbol(self):
"""
Return the node value corresponding to this ``Nonterminal``.
:rtype: (any)
"""
return self._symbol
def __eq__(self, other):
"""
Return True if this non-terminal is equal to ``other``. In
particular, return True if ``other`` is a ``Nonterminal``
and this non-terminal's symbol is equal to ``other`` 's symbol.
:rtype: bool
"""
return type(self) == type(other) and self._symbol == other._symbol
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, Nonterminal):
raise_unorderable_types("<", self, other)
return self._symbol < other._symbol
def __hash__(self):
return hash(self._symbol)
def __repr__(self):
"""
Return a string representation for this ``Nonterminal``.
:rtype: str
"""
if isinstance(self._symbol, str):
return "%s" % self._symbol
else:
return "%s" % repr(self._symbol)
def __str__(self):
"""
Return a string representation for this ``Nonterminal``.
:rtype: str
"""
if isinstance(self._symbol, str):
return "%s" % self._symbol
else:
return "%s" % repr(self._symbol)
def __div__(self, rhs):
"""
Return a new nonterminal whose symbol is ``A/B``, where ``A`` is
the symbol for this nonterminal, and ``B`` is the symbol for rhs.
:param rhs: The nonterminal used to form the right hand side
of the new nonterminal.
:type rhs: Nonterminal
:rtype: Nonterminal
"""
return Nonterminal(f"{self._symbol}/{rhs._symbol}")
def __truediv__(self, rhs):
"""
Return a new nonterminal whose symbol is ``A/B``, where ``A`` is
the symbol for this nonterminal, and ``B`` is the symbol for rhs.
This function allows use of the slash ``/`` operator with
the future import of division.
:param rhs: The nonterminal used to form the right hand side
of the new nonterminal.
:type rhs: Nonterminal
:rtype: Nonterminal
"""
return self.__div__(rhs)
class Production:
"""
A grammar production. Each production maps a single symbol
on the "left-hand side" to a sequence of symbols on the
"right-hand side". (In the case of context-free productions,
the left-hand side must be a ``Nonterminal``, and the right-hand
side is a sequence of terminals and ``Nonterminals``.)
"terminals" can be any immutable hashable object that is
not a ``Nonterminal``. Typically, terminals are strings
representing words, such as ``"dog"`` or ``"under"``.
:see: ``CFG``
:see: ``DependencyGrammar``
:see: ``Nonterminal``
:type _lhs: Nonterminal
:ivar _lhs: The left-hand side of the production.
:type _rhs: tuple(Nonterminal, terminal)
:ivar _rhs: The right-hand side of the production.
"""
def __init__(self, lhs, rhs):
"""
Construct a new ``Production``.
:param lhs: The left-hand side of the new ``Production``.
:type lhs: Nonterminal
:param rhs: The right-hand side of the new ``Production``.
:type rhs: sequence(Nonterminal and terminal)
"""
if isinstance(rhs, str):
raise TypeError(
"production right hand side should be a list, " "not a string"
)
self._lhs = lhs
self._rhs = tuple(rhs)
def lhs(self):
"""
Return the left-hand side of this ``Production``.
:rtype: Nonterminal
"""
return self._lhs
def rhs(self):
"""
Return the right-hand side of this ``Production``.
:rtype: sequence(Nonterminal and terminal)
"""
return self._rhs
def __len__(self):
"""
Return the length of the right-hand side.
:rtype: int
"""
return len(self._rhs)
def is_nonlexical(self):
"""
Return True if the right-hand side only contains ``Nonterminals``
:rtype: bool
"""
return all(is_nonterminal(n) for n in self._rhs)
def is_lexical(self):
"""
Return True if the right-hand contain at least one terminal token.
:rtype: bool
"""
return not self.is_nonlexical()
def __str__(self):
"""
Return a verbose string representation of the ``Production``.
:rtype: str
"""
result = "%s -> " % repr(self._lhs)
result += " ".join(repr(el) for el in self._rhs)
return result
def __repr__(self):
"""
Return a concise string representation of the ``Production``.
:rtype: str
"""
return "%s" % self
def __eq__(self, other):
"""
Return True if this ``Production`` is equal to ``other``.
:rtype: bool
"""
return (
type(self) == type(other)
and self._lhs == other._lhs
and self._rhs == other._rhs
)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, Production):
raise_unorderable_types("<", self, other)
return (self._lhs, self._rhs) < (other._lhs, other._rhs)
def __hash__(self):
"""
Return a hash value for the ``Production``.
:rtype: int
"""
return hash((self._lhs, self._rhs))
class CFG:
"""
A context-free grammar. A grammar consists of a start state and
a set of productions. The set of terminals and nonterminals is
implicitly specified by the productions.
If you need efficient key-based access to productions, you
can use a subclass to implement it.
"""
def __init__(self, start, productions, calculate_leftcorners=True):
"""
Create a new context-free grammar, from the given start state
and set of ``Production`` instances.
:param start: The start symbol
:type start: Nonterminal
:param productions: The list of productions that defines the grammar
:type productions: list(Production)
:param calculate_leftcorners: False if we don't want to calculate the
leftcorner relation. In that case, some optimized chart parsers won't work.
:type calculate_leftcorners: bool
"""
if not is_nonterminal(start):
raise TypeError(
"start should be a Nonterminal object,"
" not a %s" % type(start).__name__
)
self._start = start
self._productions = productions
self._categories = {prod.lhs() for prod in productions}
self._calculate_indexes()
self._calculate_grammar_forms()
if calculate_leftcorners:
self._calculate_leftcorners()
def _calculate_indexes(self):
self._lhs_index = {}
self._rhs_index = {}
self._empty_index = {}
self._lexical_index = {}
for prod in self._productions:
# Left hand side.
lhs = prod._lhs
if lhs not in self._lhs_index:
self._lhs_index[lhs] = []
self._lhs_index[lhs].append(prod)
if prod._rhs:
# First item in right hand side.
rhs0 = prod._rhs[0]
if rhs0 not in self._rhs_index:
self._rhs_index[rhs0] = []
self._rhs_index[rhs0].append(prod)
else:
# The right hand side is empty.
self._empty_index[prod.lhs()] = prod
# Lexical tokens in the right hand side.
for token in prod._rhs:
if is_terminal(token):
self._lexical_index.setdefault(token, set()).add(prod)
def _calculate_leftcorners(self):
# Calculate leftcorner relations, for use in optimized parsing.
self._immediate_leftcorner_categories = {cat: {cat} for cat in self._categories}
self._immediate_leftcorner_words = {cat: set() for cat in self._categories}
for prod in self.productions():
if len(prod) > 0:
cat, left = prod.lhs(), prod.rhs()[0]
if is_nonterminal(left):
self._immediate_leftcorner_categories[cat].add(left)
else:
self._immediate_leftcorner_words[cat].add(left)
lc = transitive_closure(self._immediate_leftcorner_categories, reflexive=True)
self._leftcorners = lc
self._leftcorner_parents = invert_graph(lc)
nr_leftcorner_categories = sum(
map(len, self._immediate_leftcorner_categories.values())
)
nr_leftcorner_words = sum(map(len, self._immediate_leftcorner_words.values()))
if nr_leftcorner_words > nr_leftcorner_categories > 10000:
# If the grammar is big, the leftcorner-word dictionary will be too large.
# In that case it is better to calculate the relation on demand.
self._leftcorner_words = None
return
self._leftcorner_words = {}
for cat in self._leftcorners:
lefts = self._leftcorners[cat]
lc = self._leftcorner_words[cat] = set()
for left in lefts:
lc.update(self._immediate_leftcorner_words.get(left, set()))
def fromstring(cls, input, encoding=None):
"""
Return the grammar instance corresponding to the input string(s).
:param input: a grammar, either in the form of a string or as a list of strings.
"""
start, productions = read_grammar(
input, standard_nonterm_parser, encoding=encoding
)
return cls(start, productions)
def start(self):
"""
Return the start symbol of the grammar
:rtype: Nonterminal
"""
return self._start
# tricky to balance readability and efficiency here!
# can't use set operations as they don't preserve ordering
def productions(self, lhs=None, rhs=None, empty=False):
"""
Return the grammar productions, filtered by the left-hand side
or the first item in the right-hand side.
:param lhs: Only return productions with the given left-hand side.
:param rhs: Only return productions with the given first item
in the right-hand side.
:param empty: Only return productions with an empty right-hand side.
:return: A list of productions matching the given constraints.
:rtype: list(Production)
"""
if rhs and empty:
raise ValueError(
"You cannot select empty and non-empty " "productions at the same time."
)
# no constraints so return everything
if not lhs and not rhs:
if not empty:
return self._productions
else:
return self._empty_index.values()
# only lhs specified so look up its index
elif lhs and not rhs:
if not empty:
return self._lhs_index.get(lhs, [])
elif lhs in self._empty_index:
return [self._empty_index[lhs]]
else:
return []
# only rhs specified so look up its index
elif rhs and not lhs:
return self._rhs_index.get(rhs, [])
# intersect
else:
return [
prod
for prod in self._lhs_index.get(lhs, [])
if prod in self._rhs_index.get(rhs, [])
]
def leftcorners(self, cat):
"""
Return the set of all nonterminals that the given nonterminal
can start with, including itself.
This is the reflexive, transitive closure of the immediate
leftcorner relation: (A > B) iff (A -> B beta)
:param cat: the parent of the leftcorners
:type cat: Nonterminal
:return: the set of all leftcorners
:rtype: set(Nonterminal)
"""
return self._leftcorners.get(cat, {cat})
def is_leftcorner(self, cat, left):
"""
True if left is a leftcorner of cat, where left can be a
terminal or a nonterminal.
:param cat: the parent of the leftcorner
:type cat: Nonterminal
:param left: the suggested leftcorner
:type left: Terminal or Nonterminal
:rtype: bool
"""
if is_nonterminal(left):
return left in self.leftcorners(cat)
elif self._leftcorner_words:
return left in self._leftcorner_words.get(cat, set())
else:
return any(
left in self._immediate_leftcorner_words.get(parent, set())
for parent in self.leftcorners(cat)
)
def leftcorner_parents(self, cat):
"""
Return the set of all nonterminals for which the given category
is a left corner. This is the inverse of the leftcorner relation.
:param cat: the suggested leftcorner
:type cat: Nonterminal
:return: the set of all parents to the leftcorner
:rtype: set(Nonterminal)
"""
return self._leftcorner_parents.get(cat, {cat})
def check_coverage(self, tokens):
"""
Check whether the grammar rules cover the given list of tokens.
If not, then raise an exception.
:type tokens: list(str)
"""
missing = [tok for tok in tokens if not self._lexical_index.get(tok)]
if missing:
missing = ", ".join(f"{w!r}" for w in missing)
raise ValueError(
"Grammar does not cover some of the " "input words: %r." % missing
)
def _calculate_grammar_forms(self):
"""
Pre-calculate of which form(s) the grammar is.
"""
prods = self._productions
self._is_lexical = all(p.is_lexical() for p in prods)
self._is_nonlexical = all(p.is_nonlexical() for p in prods if len(p) != 1)
self._min_len = min(len(p) for p in prods)
self._max_len = max(len(p) for p in prods)
self._all_unary_are_lexical = all(p.is_lexical() for p in prods if len(p) == 1)
def is_lexical(self):
"""
Return True if all productions are lexicalised.
"""
return self._is_lexical
def is_nonlexical(self):
"""
Return True if all lexical rules are "preterminals", that is,
unary rules which can be separated in a preprocessing step.
This means that all productions are of the forms
A -> B1 ... Bn (n>=0), or A -> "s".
Note: is_lexical() and is_nonlexical() are not opposites.
There are grammars which are neither, and grammars which are both.
"""
return self._is_nonlexical
def min_len(self):
"""
Return the right-hand side length of the shortest grammar production.
"""
return self._min_len
def max_len(self):
"""
Return the right-hand side length of the longest grammar production.
"""
return self._max_len
def is_nonempty(self):
"""
Return True if there are no empty productions.
"""
return self._min_len > 0
def is_binarised(self):
"""
Return True if all productions are at most binary.
Note that there can still be empty and unary productions.
"""
return self._max_len <= 2
def is_flexible_chomsky_normal_form(self):
"""
Return True if all productions are of the forms
A -> B C, A -> B, or A -> "s".
"""
return self.is_nonempty() and self.is_nonlexical() and self.is_binarised()
def is_chomsky_normal_form(self):
"""
Return True if the grammar is of Chomsky Normal Form, i.e. all productions
are of the form A -> B C, or A -> "s".
"""
return self.is_flexible_chomsky_normal_form() and self._all_unary_are_lexical
def chomsky_normal_form(self, new_token_padding="@$@", flexible=False):
"""
Returns a new Grammar that is in chomsky normal
:param: new_token_padding
Customise new rule formation during binarisation
"""
if self.is_chomsky_normal_form():
return self
if self.productions(empty=True):
raise ValueError(
"Grammar has Empty rules. " "Cannot deal with them at the moment"
)
# check for mixed rules
for rule in self.productions():
if rule.is_lexical() and len(rule.rhs()) > 1:
raise ValueError(
f"Cannot handled mixed rule {rule.lhs()} => {rule.rhs()}"
)
step1 = CFG.eliminate_start(self)
step2 = CFG.binarize(step1, new_token_padding)
if flexible:
return step2
step3 = CFG.remove_unitary_rules(step2)
step4 = CFG(step3.start(), list(set(step3.productions())))
return step4
def remove_unitary_rules(cls, grammar):
"""
Remove nonlexical unitary rules and convert them to
lexical
"""
result = []
unitary = []
for rule in grammar.productions():
if len(rule) == 1 and rule.is_nonlexical():
unitary.append(rule)
else:
result.append(rule)
while unitary:
rule = unitary.pop(0)
for item in grammar.productions(lhs=rule.rhs()[0]):
new_rule = Production(rule.lhs(), item.rhs())
if len(new_rule) != 1 or new_rule.is_lexical():
result.append(new_rule)
else:
unitary.append(new_rule)
n_grammar = CFG(grammar.start(), result)
return n_grammar
def binarize(cls, grammar, padding="@$@"):
"""
Convert all non-binary rules into binary by introducing
new tokens.
Example::
Original:
A => B C D
After Conversion:
A => B A@$@B
A@$@B => C D
"""
result = []
for rule in grammar.productions():
if len(rule.rhs()) > 2:
# this rule needs to be broken down
left_side = rule.lhs()
for k in range(0, len(rule.rhs()) - 2):
tsym = rule.rhs()[k]
new_sym = Nonterminal(left_side.symbol() + padding + tsym.symbol())
new_production = Production(left_side, (tsym, new_sym))
left_side = new_sym
result.append(new_production)
last_prd = Production(left_side, rule.rhs()[-2:])
result.append(last_prd)
else:
result.append(rule)
n_grammar = CFG(grammar.start(), result)
return n_grammar
def eliminate_start(cls, grammar):
"""
Eliminate start rule in case it appears on RHS
Example: S -> S0 S1 and S0 -> S1 S
Then another rule S0_Sigma -> S is added
"""
start = grammar.start()
result = []
need_to_add = None
for rule in grammar.productions():
if start in rule.rhs():
need_to_add = True
result.append(rule)
if need_to_add:
start = Nonterminal("S0_SIGMA")
result.append(Production(start, [grammar.start()]))
n_grammar = CFG(start, result)
return n_grammar
return grammar
def __repr__(self):
return "<Grammar with %d productions>" % len(self._productions)
def __str__(self):
result = "Grammar with %d productions" % len(self._productions)
result += " (start state = %r)" % self._start
for production in self._productions:
result += "\n %s" % production
return result
The provided code snippet includes necessary dependencies for implementing the `app` function. Write a Python function `def app()` to solve the following problem:
Create a shift reduce parser app, using a simple grammar and text.
Here is the function:
def app():
"""
Create a shift reduce parser app, using a simple grammar and
text.
"""
from nltk.grammar import CFG, Nonterminal, Production
nonterminals = "S VP NP PP P N Name V Det"
(S, VP, NP, PP, P, N, Name, V, Det) = (Nonterminal(s) for s in nonterminals.split())
productions = (
# Syntactic Productions
Production(S, [NP, VP]),
Production(NP, [Det, N]),
Production(NP, [NP, PP]),
Production(VP, [VP, PP]),
Production(VP, [V, NP, PP]),
Production(VP, [V, NP]),
Production(PP, [P, NP]),
# Lexical Productions
Production(NP, ["I"]),
Production(Det, ["the"]),
Production(Det, ["a"]),
Production(N, ["man"]),
Production(V, ["saw"]),
Production(P, ["in"]),
Production(P, ["with"]),
Production(N, ["park"]),
Production(N, ["dog"]),
Production(N, ["statue"]),
Production(Det, ["my"]),
)
grammar = CFG(S, productions)
# tokenize the sentence
sent = "my dog saw a man in the park with a statue".split()
ShiftReduceApp(grammar, sent).mainloop() | Create a shift reduce parser app, using a simple grammar and text. |
170,883 | import os.path
import pickle
from tkinter import (
Button,
Canvas,
Checkbutton,
Frame,
IntVar,
Label,
Menu,
Scrollbar,
Tk,
Toplevel,
)
from tkinter.filedialog import askopenfilename, asksaveasfilename
from tkinter.font import Font
from tkinter.messagebox import showerror, showinfo
from nltk.draw import CFGEditor, TreeSegmentWidget, tree_to_treesegment
from nltk.draw.util import (
CanvasFrame,
ColorizedList,
EntryDialog,
MutableOptionMenu,
ShowText,
SymbolWidget,
)
from nltk.grammar import CFG, Nonterminal
from nltk.parse.chart import (
BottomUpPredictCombineRule,
BottomUpPredictRule,
Chart,
LeafEdge,
LeafInitRule,
SingleEdgeFundamentalRule,
SteppingChartParser,
TopDownInitRule,
TopDownPredictRule,
TreeEdge,
)
from nltk.tree import Tree
from nltk.util import in_idle
class ChartParserApp:
def __init__(self, grammar, tokens, title="Chart Parser Application"):
# Initialize the parser
self._init_parser(grammar, tokens)
self._root = None
try:
# Create the root window.
self._root = Tk()
self._root.title(title)
self._root.bind("<Control-q>", self.destroy)
# Set up some frames.
frame3 = Frame(self._root)
frame2 = Frame(self._root)
frame1 = Frame(self._root)
frame3.pack(side="bottom", fill="none")
frame2.pack(side="bottom", fill="x")
frame1.pack(side="bottom", fill="both", expand=1)
self._init_fonts(self._root)
self._init_animation()
self._init_chartview(frame1)
self._init_rulelabel(frame2)
self._init_buttons(frame3)
self._init_menubar()
self._matrix = None
self._results = None
# Set up keyboard bindings.
self._init_bindings()
except:
print("Error creating Tree View")
self.destroy()
raise
def destroy(self, *args):
if self._root is None:
return
self._root.destroy()
self._root = None
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this demo is created from a non-interactive program (e.g.
from a secript); otherwise, the demo will close as soon as
the script completes.
"""
if in_idle():
return
self._root.mainloop(*args, **kwargs)
# ////////////////////////////////////////////////////////////
# Initialization Helpers
# ////////////////////////////////////////////////////////////
def _init_parser(self, grammar, tokens):
self._grammar = grammar
self._tokens = tokens
self._reset_parser()
def _reset_parser(self):
self._cp = SteppingChartParser(self._grammar)
self._cp.initialize(self._tokens)
self._chart = self._cp.chart()
# Insert LeafEdges before the parsing starts.
for _new_edge in LeafInitRule().apply(self._chart, self._grammar):
pass
# The step iterator -- use this to generate new edges
self._cpstep = self._cp.step()
# The currently selected edge
self._selection = None
def _init_fonts(self, root):
# See: <http://www.astro.washington.edu/owen/ROTKFolklore.html>
self._sysfont = Font(font=Button()["font"])
root.option_add("*Font", self._sysfont)
# TWhat's our font size (default=same as sysfont)
self._size = IntVar(root)
self._size.set(self._sysfont.cget("size"))
self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get())
self._font = Font(family="helvetica", size=self._size.get())
def _init_animation(self):
# Are we stepping? (default=yes)
self._step = IntVar(self._root)
self._step.set(1)
# What's our animation speed (default=fast)
self._animate = IntVar(self._root)
self._animate.set(3) # Default speed = fast
# Are we currently animating?
self._animating = 0
def _init_chartview(self, parent):
self._cv = ChartView(self._chart, parent, draw_tree=1, draw_sentence=1)
self._cv.add_callback("select", self._click_cv_edge)
def _init_rulelabel(self, parent):
ruletxt = "Last edge generated by:"
self._rulelabel1 = Label(parent, text=ruletxt, font=self._boldfont)
self._rulelabel2 = Label(
parent, width=40, relief="groove", anchor="w", font=self._boldfont
)
self._rulelabel1.pack(side="left")
self._rulelabel2.pack(side="left")
step = Checkbutton(parent, variable=self._step, text="Step")
step.pack(side="right")
def _init_buttons(self, parent):
frame1 = Frame(parent)
frame2 = Frame(parent)
frame1.pack(side="bottom", fill="x")
frame2.pack(side="top", fill="none")
Button(
frame1,
text="Reset\nParser",
background="#90c0d0",
foreground="black",
command=self.reset,
).pack(side="right")
# Button(frame1, text='Pause',
# background='#90c0d0', foreground='black',
# command=self.pause).pack(side='left')
Button(
frame1,
text="Top Down\nStrategy",
background="#90c0d0",
foreground="black",
command=self.top_down_strategy,
).pack(side="left")
Button(
frame1,
text="Bottom Up\nStrategy",
background="#90c0d0",
foreground="black",
command=self.bottom_up_strategy,
).pack(side="left")
Button(
frame1,
text="Bottom Up\nLeft-Corner Strategy",
background="#90c0d0",
foreground="black",
command=self.bottom_up_leftcorner_strategy,
).pack(side="left")
Button(
frame2,
text="Top Down Init\nRule",
background="#90f090",
foreground="black",
command=self.top_down_init,
).pack(side="left")
Button(
frame2,
text="Top Down Predict\nRule",
background="#90f090",
foreground="black",
command=self.top_down_predict,
).pack(side="left")
Frame(frame2, width=20).pack(side="left")
Button(
frame2,
text="Bottom Up Predict\nRule",
background="#90f090",
foreground="black",
command=self.bottom_up,
).pack(side="left")
Frame(frame2, width=20).pack(side="left")
Button(
frame2,
text="Bottom Up Left-Corner\nPredict Rule",
background="#90f090",
foreground="black",
command=self.bottom_up_leftcorner,
).pack(side="left")
Frame(frame2, width=20).pack(side="left")
Button(
frame2,
text="Fundamental\nRule",
background="#90f090",
foreground="black",
command=self.fundamental,
).pack(side="left")
def _init_bindings(self):
self._root.bind("<Up>", self._cv.scroll_up)
self._root.bind("<Down>", self._cv.scroll_down)
self._root.bind("<Prior>", self._cv.page_up)
self._root.bind("<Next>", self._cv.page_down)
self._root.bind("<Control-q>", self.destroy)
self._root.bind("<Control-x>", self.destroy)
self._root.bind("<F1>", self.help)
self._root.bind("<Control-s>", self.save_chart)
self._root.bind("<Control-o>", self.load_chart)
self._root.bind("<Control-r>", self.reset)
self._root.bind("t", self.top_down_strategy)
self._root.bind("b", self.bottom_up_strategy)
self._root.bind("c", self.bottom_up_leftcorner_strategy)
self._root.bind("<space>", self._stop_animation)
self._root.bind("<Control-g>", self.edit_grammar)
self._root.bind("<Control-t>", self.edit_sentence)
# Animation speed control
self._root.bind("-", lambda e, a=self._animate: a.set(1))
self._root.bind("=", lambda e, a=self._animate: a.set(2))
self._root.bind("+", lambda e, a=self._animate: a.set(3))
# Step control
self._root.bind("s", lambda e, s=self._step: s.set(not s.get()))
def _init_menubar(self):
menubar = Menu(self._root)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(
label="Save Chart",
underline=0,
command=self.save_chart,
accelerator="Ctrl-s",
)
filemenu.add_command(
label="Load Chart",
underline=0,
command=self.load_chart,
accelerator="Ctrl-o",
)
filemenu.add_command(
label="Reset Chart", underline=0, command=self.reset, accelerator="Ctrl-r"
)
filemenu.add_separator()
filemenu.add_command(label="Save Grammar", command=self.save_grammar)
filemenu.add_command(label="Load Grammar", command=self.load_grammar)
filemenu.add_separator()
filemenu.add_command(
label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x"
)
menubar.add_cascade(label="File", underline=0, menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(
label="Edit Grammar",
underline=5,
command=self.edit_grammar,
accelerator="Ctrl-g",
)
editmenu.add_command(
label="Edit Text",
underline=5,
command=self.edit_sentence,
accelerator="Ctrl-t",
)
menubar.add_cascade(label="Edit", underline=0, menu=editmenu)
viewmenu = Menu(menubar, tearoff=0)
viewmenu.add_command(
label="Chart Matrix", underline=6, command=self.view_matrix
)
viewmenu.add_command(label="Results", underline=0, command=self.view_results)
menubar.add_cascade(label="View", underline=0, menu=viewmenu)
rulemenu = Menu(menubar, tearoff=0)
rulemenu.add_command(
label="Top Down Strategy",
underline=0,
command=self.top_down_strategy,
accelerator="t",
)
rulemenu.add_command(
label="Bottom Up Strategy",
underline=0,
command=self.bottom_up_strategy,
accelerator="b",
)
rulemenu.add_command(
label="Bottom Up Left-Corner Strategy",
underline=0,
command=self.bottom_up_leftcorner_strategy,
accelerator="c",
)
rulemenu.add_separator()
rulemenu.add_command(label="Bottom Up Rule", command=self.bottom_up)
rulemenu.add_command(
label="Bottom Up Left-Corner Rule", command=self.bottom_up_leftcorner
)
rulemenu.add_command(label="Top Down Init Rule", command=self.top_down_init)
rulemenu.add_command(
label="Top Down Predict Rule", command=self.top_down_predict
)
rulemenu.add_command(label="Fundamental Rule", command=self.fundamental)
menubar.add_cascade(label="Apply", underline=0, menu=rulemenu)
animatemenu = Menu(menubar, tearoff=0)
animatemenu.add_checkbutton(
label="Step", underline=0, variable=self._step, accelerator="s"
)
animatemenu.add_separator()
animatemenu.add_radiobutton(
label="No Animation", underline=0, variable=self._animate, value=0
)
animatemenu.add_radiobutton(
label="Slow Animation",
underline=0,
variable=self._animate,
value=1,
accelerator="-",
)
animatemenu.add_radiobutton(
label="Normal Animation",
underline=0,
variable=self._animate,
value=2,
accelerator="=",
)
animatemenu.add_radiobutton(
label="Fast Animation",
underline=0,
variable=self._animate,
value=3,
accelerator="+",
)
menubar.add_cascade(label="Animate", underline=1, menu=animatemenu)
zoommenu = Menu(menubar, tearoff=0)
zoommenu.add_radiobutton(
label="Tiny",
variable=self._size,
underline=0,
value=10,
command=self.resize,
)
zoommenu.add_radiobutton(
label="Small",
variable=self._size,
underline=0,
value=12,
command=self.resize,
)
zoommenu.add_radiobutton(
label="Medium",
variable=self._size,
underline=0,
value=14,
command=self.resize,
)
zoommenu.add_radiobutton(
label="Large",
variable=self._size,
underline=0,
value=18,
command=self.resize,
)
zoommenu.add_radiobutton(
label="Huge",
variable=self._size,
underline=0,
value=24,
command=self.resize,
)
menubar.add_cascade(label="Zoom", underline=0, menu=zoommenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", underline=0, command=self.about)
helpmenu.add_command(
label="Instructions", underline=0, command=self.help, accelerator="F1"
)
menubar.add_cascade(label="Help", underline=0, menu=helpmenu)
self._root.config(menu=menubar)
# ////////////////////////////////////////////////////////////
# Selection Handling
# ////////////////////////////////////////////////////////////
def _click_cv_edge(self, edge):
if edge != self._selection:
# Clicking on a new edge selects it.
self._select_edge(edge)
else:
# Repeated clicks on one edge cycle its trees.
self._cv.cycle_tree()
# [XX] this can get confused if animation is running
# faster than the callbacks...
def _select_matrix_edge(self, edge):
self._select_edge(edge)
self._cv.view_edge(edge)
def _select_edge(self, edge):
self._selection = edge
# Update the chart view.
self._cv.markonly_edge(edge, "#f00")
self._cv.draw_tree(edge)
# Update the matrix view.
if self._matrix:
self._matrix.markonly_edge(edge)
if self._matrix:
self._matrix.view_edge(edge)
def _deselect_edge(self):
self._selection = None
# Update the chart view.
self._cv.unmark_edge()
self._cv.erase_tree()
# Update the matrix view
if self._matrix:
self._matrix.unmark_edge()
def _show_new_edge(self, edge):
self._display_rule(self._cp.current_chartrule())
# Update the chart view.
self._cv.update()
self._cv.draw_tree(edge)
self._cv.markonly_edge(edge, "#0df")
self._cv.view_edge(edge)
# Update the matrix view.
if self._matrix:
self._matrix.update()
if self._matrix:
self._matrix.markonly_edge(edge)
if self._matrix:
self._matrix.view_edge(edge)
# Update the results view.
if self._results:
self._results.update(edge)
# ////////////////////////////////////////////////////////////
# Help/usage
# ////////////////////////////////////////////////////////////
def help(self, *e):
self._animating = 0
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(
self._root,
"Help: Chart Parser Application",
(__doc__ or "").strip(),
width=75,
font="fixed",
)
except:
ShowText(
self._root,
"Help: Chart Parser Application",
(__doc__ or "").strip(),
width=75,
)
def about(self, *e):
ABOUT = "NLTK Chart Parser Application\n" + "Written by Edward Loper"
showinfo("About: Chart Parser Application", ABOUT)
# ////////////////////////////////////////////////////////////
# File Menu
# ////////////////////////////////////////////////////////////
CHART_FILE_TYPES = [("Pickle file", ".pickle"), ("All files", "*")]
GRAMMAR_FILE_TYPES = [
("Plaintext grammar file", ".cfg"),
("Pickle file", ".pickle"),
("All files", "*"),
]
def load_chart(self, *args):
"Load a chart from a pickle file"
filename = askopenfilename(
filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle"
)
if not filename:
return
try:
with open(filename, "rb") as infile:
chart = pickle.load(infile)
self._chart = chart
self._cv.update(chart)
if self._matrix:
self._matrix.set_chart(chart)
if self._matrix:
self._matrix.deselect_cell()
if self._results:
self._results.set_chart(chart)
self._cp.set_chart(chart)
except Exception as e:
raise
showerror("Error Loading Chart", "Unable to open file: %r" % filename)
def save_chart(self, *args):
"Save a chart to a pickle file"
filename = asksaveasfilename(
filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle"
)
if not filename:
return
try:
with open(filename, "wb") as outfile:
pickle.dump(self._chart, outfile)
except Exception as e:
raise
showerror("Error Saving Chart", "Unable to open file: %r" % filename)
def load_grammar(self, *args):
"Load a grammar from a pickle file"
filename = askopenfilename(
filetypes=self.GRAMMAR_FILE_TYPES, defaultextension=".cfg"
)
if not filename:
return
try:
if filename.endswith(".pickle"):
with open(filename, "rb") as infile:
grammar = pickle.load(infile)
else:
with open(filename) as infile:
grammar = CFG.fromstring(infile.read())
self.set_grammar(grammar)
except Exception as e:
showerror("Error Loading Grammar", "Unable to open file: %r" % filename)
def save_grammar(self, *args):
filename = asksaveasfilename(
filetypes=self.GRAMMAR_FILE_TYPES, defaultextension=".cfg"
)
if not filename:
return
try:
if filename.endswith(".pickle"):
with open(filename, "wb") as outfile:
pickle.dump((self._chart, self._tokens), outfile)
else:
with open(filename, "w") as outfile:
prods = self._grammar.productions()
start = [p for p in prods if p.lhs() == self._grammar.start()]
rest = [p for p in prods if p.lhs() != self._grammar.start()]
for prod in start:
outfile.write("%s\n" % prod)
for prod in rest:
outfile.write("%s\n" % prod)
except Exception as e:
showerror("Error Saving Grammar", "Unable to open file: %r" % filename)
def reset(self, *args):
self._animating = 0
self._reset_parser()
self._cv.update(self._chart)
if self._matrix:
self._matrix.set_chart(self._chart)
if self._matrix:
self._matrix.deselect_cell()
if self._results:
self._results.set_chart(self._chart)
# ////////////////////////////////////////////////////////////
# Edit
# ////////////////////////////////////////////////////////////
def edit_grammar(self, *e):
CFGEditor(self._root, self._grammar, self.set_grammar)
def set_grammar(self, grammar):
self._grammar = grammar
self._cp.set_grammar(grammar)
if self._results:
self._results.set_grammar(grammar)
def edit_sentence(self, *e):
sentence = " ".join(self._tokens)
title = "Edit Text"
instr = "Enter a new sentence to parse."
EntryDialog(self._root, sentence, instr, self.set_sentence, title)
def set_sentence(self, sentence):
self._tokens = list(sentence.split())
self.reset()
# ////////////////////////////////////////////////////////////
# View Menu
# ////////////////////////////////////////////////////////////
def view_matrix(self, *e):
if self._matrix is not None:
self._matrix.destroy()
self._matrix = ChartMatrixView(self._root, self._chart)
self._matrix.add_callback("select", self._select_matrix_edge)
def view_results(self, *e):
if self._results is not None:
self._results.destroy()
self._results = ChartResultsView(self._root, self._chart, self._grammar)
# ////////////////////////////////////////////////////////////
# Zoom Menu
# ////////////////////////////////////////////////////////////
def resize(self):
self._animating = 0
self.set_font_size(self._size.get())
def set_font_size(self, size):
self._cv.set_font_size(size)
self._font.configure(size=-abs(size))
self._boldfont.configure(size=-abs(size))
self._sysfont.configure(size=-abs(size))
def get_font_size(self):
return abs(self._size.get())
# ////////////////////////////////////////////////////////////
# Parsing
# ////////////////////////////////////////////////////////////
def apply_strategy(self, strategy, edge_strategy=None):
# If we're animating, then stop.
if self._animating:
self._animating = 0
return
# Clear the rule display & mark.
self._display_rule(None)
# self._cv.unmark_edge()
if self._step.get():
selection = self._selection
if (selection is not None) and (edge_strategy is not None):
# Apply the given strategy to the selected edge.
self._cp.set_strategy([edge_strategy(selection)])
newedge = self._apply_strategy()
# If it failed, then clear the selection.
if newedge is None:
self._cv.unmark_edge()
self._selection = None
else:
self._cp.set_strategy(strategy)
self._apply_strategy()
else:
self._cp.set_strategy(strategy)
if self._animate.get():
self._animating = 1
self._animate_strategy()
else:
for edge in self._cpstep:
if edge is None:
break
self._cv.update()
if self._matrix:
self._matrix.update()
if self._results:
self._results.update()
def _stop_animation(self, *e):
self._animating = 0
def _animate_strategy(self, speed=1):
if self._animating == 0:
return
if self._apply_strategy() is not None:
if self._animate.get() == 0 or self._step.get() == 1:
return
if self._animate.get() == 1:
self._root.after(3000, self._animate_strategy)
elif self._animate.get() == 2:
self._root.after(1000, self._animate_strategy)
else:
self._root.after(20, self._animate_strategy)
def _apply_strategy(self):
new_edge = next(self._cpstep)
if new_edge is not None:
self._show_new_edge(new_edge)
return new_edge
def _display_rule(self, rule):
if rule is None:
self._rulelabel2["text"] = ""
else:
name = str(rule)
self._rulelabel2["text"] = name
size = self._cv.get_font_size()
# ////////////////////////////////////////////////////////////
# Parsing Strategies
# ////////////////////////////////////////////////////////////
# Basic rules:
_TD_INIT = [TopDownInitRule()]
_TD_PREDICT = [TopDownPredictRule()]
_BU_RULE = [BottomUpPredictRule()]
_BU_LC_RULE = [BottomUpPredictCombineRule()]
_FUNDAMENTAL = [SingleEdgeFundamentalRule()]
# Complete strategies:
_TD_STRATEGY = _TD_INIT + _TD_PREDICT + _FUNDAMENTAL
_BU_STRATEGY = _BU_RULE + _FUNDAMENTAL
_BU_LC_STRATEGY = _BU_LC_RULE + _FUNDAMENTAL
# Button callback functions:
def top_down_init(self, *e):
self.apply_strategy(self._TD_INIT, None)
def top_down_predict(self, *e):
self.apply_strategy(self._TD_PREDICT, TopDownPredictEdgeRule)
def bottom_up(self, *e):
self.apply_strategy(self._BU_RULE, BottomUpEdgeRule)
def bottom_up_leftcorner(self, *e):
self.apply_strategy(self._BU_LC_RULE, BottomUpLeftCornerEdgeRule)
def fundamental(self, *e):
self.apply_strategy(self._FUNDAMENTAL, FundamentalEdgeRule)
def bottom_up_strategy(self, *e):
self.apply_strategy(self._BU_STRATEGY, BottomUpEdgeRule)
def bottom_up_leftcorner_strategy(self, *e):
self.apply_strategy(self._BU_LC_STRATEGY, BottomUpLeftCornerEdgeRule)
def top_down_strategy(self, *e):
self.apply_strategy(self._TD_STRATEGY, TopDownPredictEdgeRule)
class CFG:
"""
A context-free grammar. A grammar consists of a start state and
a set of productions. The set of terminals and nonterminals is
implicitly specified by the productions.
If you need efficient key-based access to productions, you
can use a subclass to implement it.
"""
def __init__(self, start, productions, calculate_leftcorners=True):
"""
Create a new context-free grammar, from the given start state
and set of ``Production`` instances.
:param start: The start symbol
:type start: Nonterminal
:param productions: The list of productions that defines the grammar
:type productions: list(Production)
:param calculate_leftcorners: False if we don't want to calculate the
leftcorner relation. In that case, some optimized chart parsers won't work.
:type calculate_leftcorners: bool
"""
if not is_nonterminal(start):
raise TypeError(
"start should be a Nonterminal object,"
" not a %s" % type(start).__name__
)
self._start = start
self._productions = productions
self._categories = {prod.lhs() for prod in productions}
self._calculate_indexes()
self._calculate_grammar_forms()
if calculate_leftcorners:
self._calculate_leftcorners()
def _calculate_indexes(self):
self._lhs_index = {}
self._rhs_index = {}
self._empty_index = {}
self._lexical_index = {}
for prod in self._productions:
# Left hand side.
lhs = prod._lhs
if lhs not in self._lhs_index:
self._lhs_index[lhs] = []
self._lhs_index[lhs].append(prod)
if prod._rhs:
# First item in right hand side.
rhs0 = prod._rhs[0]
if rhs0 not in self._rhs_index:
self._rhs_index[rhs0] = []
self._rhs_index[rhs0].append(prod)
else:
# The right hand side is empty.
self._empty_index[prod.lhs()] = prod
# Lexical tokens in the right hand side.
for token in prod._rhs:
if is_terminal(token):
self._lexical_index.setdefault(token, set()).add(prod)
def _calculate_leftcorners(self):
# Calculate leftcorner relations, for use in optimized parsing.
self._immediate_leftcorner_categories = {cat: {cat} for cat in self._categories}
self._immediate_leftcorner_words = {cat: set() for cat in self._categories}
for prod in self.productions():
if len(prod) > 0:
cat, left = prod.lhs(), prod.rhs()[0]
if is_nonterminal(left):
self._immediate_leftcorner_categories[cat].add(left)
else:
self._immediate_leftcorner_words[cat].add(left)
lc = transitive_closure(self._immediate_leftcorner_categories, reflexive=True)
self._leftcorners = lc
self._leftcorner_parents = invert_graph(lc)
nr_leftcorner_categories = sum(
map(len, self._immediate_leftcorner_categories.values())
)
nr_leftcorner_words = sum(map(len, self._immediate_leftcorner_words.values()))
if nr_leftcorner_words > nr_leftcorner_categories > 10000:
# If the grammar is big, the leftcorner-word dictionary will be too large.
# In that case it is better to calculate the relation on demand.
self._leftcorner_words = None
return
self._leftcorner_words = {}
for cat in self._leftcorners:
lefts = self._leftcorners[cat]
lc = self._leftcorner_words[cat] = set()
for left in lefts:
lc.update(self._immediate_leftcorner_words.get(left, set()))
def fromstring(cls, input, encoding=None):
"""
Return the grammar instance corresponding to the input string(s).
:param input: a grammar, either in the form of a string or as a list of strings.
"""
start, productions = read_grammar(
input, standard_nonterm_parser, encoding=encoding
)
return cls(start, productions)
def start(self):
"""
Return the start symbol of the grammar
:rtype: Nonterminal
"""
return self._start
# tricky to balance readability and efficiency here!
# can't use set operations as they don't preserve ordering
def productions(self, lhs=None, rhs=None, empty=False):
"""
Return the grammar productions, filtered by the left-hand side
or the first item in the right-hand side.
:param lhs: Only return productions with the given left-hand side.
:param rhs: Only return productions with the given first item
in the right-hand side.
:param empty: Only return productions with an empty right-hand side.
:return: A list of productions matching the given constraints.
:rtype: list(Production)
"""
if rhs and empty:
raise ValueError(
"You cannot select empty and non-empty " "productions at the same time."
)
# no constraints so return everything
if not lhs and not rhs:
if not empty:
return self._productions
else:
return self._empty_index.values()
# only lhs specified so look up its index
elif lhs and not rhs:
if not empty:
return self._lhs_index.get(lhs, [])
elif lhs in self._empty_index:
return [self._empty_index[lhs]]
else:
return []
# only rhs specified so look up its index
elif rhs and not lhs:
return self._rhs_index.get(rhs, [])
# intersect
else:
return [
prod
for prod in self._lhs_index.get(lhs, [])
if prod in self._rhs_index.get(rhs, [])
]
def leftcorners(self, cat):
"""
Return the set of all nonterminals that the given nonterminal
can start with, including itself.
This is the reflexive, transitive closure of the immediate
leftcorner relation: (A > B) iff (A -> B beta)
:param cat: the parent of the leftcorners
:type cat: Nonterminal
:return: the set of all leftcorners
:rtype: set(Nonterminal)
"""
return self._leftcorners.get(cat, {cat})
def is_leftcorner(self, cat, left):
"""
True if left is a leftcorner of cat, where left can be a
terminal or a nonterminal.
:param cat: the parent of the leftcorner
:type cat: Nonterminal
:param left: the suggested leftcorner
:type left: Terminal or Nonterminal
:rtype: bool
"""
if is_nonterminal(left):
return left in self.leftcorners(cat)
elif self._leftcorner_words:
return left in self._leftcorner_words.get(cat, set())
else:
return any(
left in self._immediate_leftcorner_words.get(parent, set())
for parent in self.leftcorners(cat)
)
def leftcorner_parents(self, cat):
"""
Return the set of all nonterminals for which the given category
is a left corner. This is the inverse of the leftcorner relation.
:param cat: the suggested leftcorner
:type cat: Nonterminal
:return: the set of all parents to the leftcorner
:rtype: set(Nonterminal)
"""
return self._leftcorner_parents.get(cat, {cat})
def check_coverage(self, tokens):
"""
Check whether the grammar rules cover the given list of tokens.
If not, then raise an exception.
:type tokens: list(str)
"""
missing = [tok for tok in tokens if not self._lexical_index.get(tok)]
if missing:
missing = ", ".join(f"{w!r}" for w in missing)
raise ValueError(
"Grammar does not cover some of the " "input words: %r." % missing
)
def _calculate_grammar_forms(self):
"""
Pre-calculate of which form(s) the grammar is.
"""
prods = self._productions
self._is_lexical = all(p.is_lexical() for p in prods)
self._is_nonlexical = all(p.is_nonlexical() for p in prods if len(p) != 1)
self._min_len = min(len(p) for p in prods)
self._max_len = max(len(p) for p in prods)
self._all_unary_are_lexical = all(p.is_lexical() for p in prods if len(p) == 1)
def is_lexical(self):
"""
Return True if all productions are lexicalised.
"""
return self._is_lexical
def is_nonlexical(self):
"""
Return True if all lexical rules are "preterminals", that is,
unary rules which can be separated in a preprocessing step.
This means that all productions are of the forms
A -> B1 ... Bn (n>=0), or A -> "s".
Note: is_lexical() and is_nonlexical() are not opposites.
There are grammars which are neither, and grammars which are both.
"""
return self._is_nonlexical
def min_len(self):
"""
Return the right-hand side length of the shortest grammar production.
"""
return self._min_len
def max_len(self):
"""
Return the right-hand side length of the longest grammar production.
"""
return self._max_len
def is_nonempty(self):
"""
Return True if there are no empty productions.
"""
return self._min_len > 0
def is_binarised(self):
"""
Return True if all productions are at most binary.
Note that there can still be empty and unary productions.
"""
return self._max_len <= 2
def is_flexible_chomsky_normal_form(self):
"""
Return True if all productions are of the forms
A -> B C, A -> B, or A -> "s".
"""
return self.is_nonempty() and self.is_nonlexical() and self.is_binarised()
def is_chomsky_normal_form(self):
"""
Return True if the grammar is of Chomsky Normal Form, i.e. all productions
are of the form A -> B C, or A -> "s".
"""
return self.is_flexible_chomsky_normal_form() and self._all_unary_are_lexical
def chomsky_normal_form(self, new_token_padding="@$@", flexible=False):
"""
Returns a new Grammar that is in chomsky normal
:param: new_token_padding
Customise new rule formation during binarisation
"""
if self.is_chomsky_normal_form():
return self
if self.productions(empty=True):
raise ValueError(
"Grammar has Empty rules. " "Cannot deal with them at the moment"
)
# check for mixed rules
for rule in self.productions():
if rule.is_lexical() and len(rule.rhs()) > 1:
raise ValueError(
f"Cannot handled mixed rule {rule.lhs()} => {rule.rhs()}"
)
step1 = CFG.eliminate_start(self)
step2 = CFG.binarize(step1, new_token_padding)
if flexible:
return step2
step3 = CFG.remove_unitary_rules(step2)
step4 = CFG(step3.start(), list(set(step3.productions())))
return step4
def remove_unitary_rules(cls, grammar):
"""
Remove nonlexical unitary rules and convert them to
lexical
"""
result = []
unitary = []
for rule in grammar.productions():
if len(rule) == 1 and rule.is_nonlexical():
unitary.append(rule)
else:
result.append(rule)
while unitary:
rule = unitary.pop(0)
for item in grammar.productions(lhs=rule.rhs()[0]):
new_rule = Production(rule.lhs(), item.rhs())
if len(new_rule) != 1 or new_rule.is_lexical():
result.append(new_rule)
else:
unitary.append(new_rule)
n_grammar = CFG(grammar.start(), result)
return n_grammar
def binarize(cls, grammar, padding="@$@"):
"""
Convert all non-binary rules into binary by introducing
new tokens.
Example::
Original:
A => B C D
After Conversion:
A => B A@$@B
A@$@B => C D
"""
result = []
for rule in grammar.productions():
if len(rule.rhs()) > 2:
# this rule needs to be broken down
left_side = rule.lhs()
for k in range(0, len(rule.rhs()) - 2):
tsym = rule.rhs()[k]
new_sym = Nonterminal(left_side.symbol() + padding + tsym.symbol())
new_production = Production(left_side, (tsym, new_sym))
left_side = new_sym
result.append(new_production)
last_prd = Production(left_side, rule.rhs()[-2:])
result.append(last_prd)
else:
result.append(rule)
n_grammar = CFG(grammar.start(), result)
return n_grammar
def eliminate_start(cls, grammar):
"""
Eliminate start rule in case it appears on RHS
Example: S -> S0 S1 and S0 -> S1 S
Then another rule S0_Sigma -> S is added
"""
start = grammar.start()
result = []
need_to_add = None
for rule in grammar.productions():
if start in rule.rhs():
need_to_add = True
result.append(rule)
if need_to_add:
start = Nonterminal("S0_SIGMA")
result.append(Production(start, [grammar.start()]))
n_grammar = CFG(start, result)
return n_grammar
return grammar
def __repr__(self):
return "<Grammar with %d productions>" % len(self._productions)
def __str__(self):
result = "Grammar with %d productions" % len(self._productions)
result += " (start state = %r)" % self._start
for production in self._productions:
result += "\n %s" % production
return result
def app():
grammar = CFG.fromstring(
"""
# Grammatical productions.
S -> NP VP
VP -> VP PP | V NP | V
NP -> Det N | NP PP
PP -> P NP
# Lexical productions.
NP -> 'John' | 'I'
Det -> 'the' | 'my' | 'a'
N -> 'dog' | 'cookie' | 'table' | 'cake' | 'fork'
V -> 'ate' | 'saw'
P -> 'on' | 'under' | 'with'
"""
)
sent = "John ate the cake on the table with a fork"
sent = "John ate the cake on the table"
tokens = list(sent.split())
print("grammar= (")
for rule in grammar.productions():
print((" ", repr(rule) + ","))
print(")")
print("tokens = %r" % tokens)
print('Calling "ChartParserApp(grammar, tokens)"...')
ChartParserApp(grammar, tokens).mainloop() | null |
170,884 | import re
from warnings import warn
from nltk.corpus import bcp47
def langname(tag, typ="full"):
"""
Convert a composite BCP-47 tag to a language name
>>> from nltk.langnames import langname
>>> langname('ca-Latn-ES-valencia')
'Catalan: Latin: Spain: Valencian'
>>> langname('ca-Latn-ES-valencia', typ="short")
'Catalan'
"""
tags = tag.split("-")
code = tags[0].lower()
if codepattern.fullmatch(code):
if code in iso639retired: # retired codes
return iso639retired[code]
elif code in iso639short: # 3-letter codes
code2 = iso639short[code] # convert to 2-letter code
warn(f"Shortening {code!r} to {code2!r}", stacklevel=2)
tag = "-".join([code2] + tags[1:])
name = bcp47.name(tag) # parse according to BCP-47
if typ == "full":
return name # include all subtags
elif name:
return name.split(":")[0] # only the language subtag
else:
warn(f"Could not find code in {code!r}", stacklevel=2)
def q2tag(qcode):
"""
Convert Wikidata Q-code to BCP-47 tag
>>> q2tag('Q4289225')
'nds-u-sd-demv'
"""
return wiki_bcp47[qcode]
The provided code snippet includes necessary dependencies for implementing the `q2name` function. Write a Python function `def q2name(qcode, typ="full")` to solve the following problem:
Convert Wikidata Q-code to BCP-47 (full or short) language name >>> q2name('Q4289225') 'Low German: Mecklenburg-Vorpommern' >>> q2name('Q4289225', "short") 'Low German'
Here is the function:
def q2name(qcode, typ="full"):
"""
Convert Wikidata Q-code to BCP-47 (full or short) language name
>>> q2name('Q4289225')
'Low German: Mecklenburg-Vorpommern'
>>> q2name('Q4289225', "short")
'Low German'
"""
return langname(q2tag(qcode), typ) | Convert Wikidata Q-code to BCP-47 (full or short) language name >>> q2name('Q4289225') 'Low German: Mecklenburg-Vorpommern' >>> q2name('Q4289225', "short") 'Low German' |
170,885 | import re
from warnings import warn
from nltk.corpus import bcp47
def langcode(name, typ=2):
"""
Convert language name to iso639-3 language code. Returns the short 2-letter
code by default, if one is available, and the 3-letter code otherwise:
>>> from nltk.langnames import langcode
>>> langcode('Modern Greek (1453-)')
'el'
Specify 'typ=3' to get the 3-letter code:
>>> langcode('Modern Greek (1453-)', typ=3)
'ell'
"""
if name in bcp47.langcode:
code = bcp47.langcode[name]
if typ == 3 and code in iso639long:
code = iso639long[code] # convert to 3-letter code
return code
elif name in iso639code_retired:
return iso639code_retired[name]
else:
warn(f"Could not find language in {name!r}", stacklevel=2)
def tag2q(tag):
"""
Convert BCP-47 tag to Wikidata Q-code
>>> tag2q('nds-u-sd-demv')
'Q4289225'
"""
return bcp47.wiki_q[tag]
The provided code snippet includes necessary dependencies for implementing the `lang2q` function. Write a Python function `def lang2q(name)` to solve the following problem:
Convert simple language name to Wikidata Q-code >>> lang2q('Low German') 'Q25433'
Here is the function:
def lang2q(name):
"""
Convert simple language name to Wikidata Q-code
>>> lang2q('Low German')
'Q25433'
"""
return tag2q(langcode(name)) | Convert simple language name to Wikidata Q-code >>> lang2q('Low German') 'Q25433' |
170,886 | import re
from warnings import warn
from nltk.corpus import bcp47
The provided code snippet includes necessary dependencies for implementing the `inverse_dict` function. Write a Python function `def inverse_dict(dic)` to solve the following problem:
Return inverse mapping, but only if it is bijective
Here is the function:
def inverse_dict(dic):
"""Return inverse mapping, but only if it is bijective"""
if len(dic.keys()) == len(set(dic.values())):
return {val: key for (key, val) in dic.items()}
else:
warn("This dictionary has no bijective inverse mapping.") | Return inverse mapping, but only if it is bijective |
170,887 |
The provided code snippet includes necessary dependencies for implementing the `suffix_replace` function. Write a Python function `def suffix_replace(original, old, new)` to solve the following problem:
Replaces the old suffix of the original string by a new suffix
Here is the function:
def suffix_replace(original, old, new):
"""
Replaces the old suffix of the original string by a new suffix
"""
return original[: -len(old)] + new | Replaces the old suffix of the original string by a new suffix |
170,888 |
The provided code snippet includes necessary dependencies for implementing the `prefix_replace` function. Write a Python function `def prefix_replace(original, old, new)` to solve the following problem:
Replaces the old prefix of the original string by a new suffix :param original: string :param old: string :param new: string :return: string
Here is the function:
def prefix_replace(original, old, new):
"""
Replaces the old prefix of the original string by a new suffix
:param original: string
:param old: string
:param new: string
:return: string
"""
return new + original[len(old) :] | Replaces the old prefix of the original string by a new suffix :param original: string :param old: string :param new: string :return: string |
170,889 | import re
from nltk.corpus import stopwords
from nltk.stem import porter
from nltk.stem.api import StemmerI
from nltk.stem.util import prefix_replace, suffix_replace
class SnowballStemmer(StemmerI):
"""
Snowball Stemmer
The following languages are supported:
Arabic, Danish, Dutch, English, Finnish, French, German,
Hungarian, Italian, Norwegian, Portuguese, Romanian, Russian,
Spanish and Swedish.
The algorithm for English is documented here:
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
The algorithms have been developed by Martin Porter.
These stemmers are called Snowball, because Porter created
a programming language with this name for creating
new stemming algorithms. There is more information available
at http://snowball.tartarus.org/
The stemmer is invoked as shown below:
>>> from nltk.stem import SnowballStemmer # See which languages are supported
>>> print(" ".join(SnowballStemmer.languages)) # doctest: +NORMALIZE_WHITESPACE
arabic danish dutch english finnish french german hungarian
italian norwegian porter portuguese romanian russian
spanish swedish
>>> stemmer = SnowballStemmer("german") # Choose a language
>>> stemmer.stem("Autobahnen") # Stem a word
'autobahn'
Invoking the stemmers that way is useful if you do not know the
language to be stemmed at runtime. Alternatively, if you already know
the language, then you can invoke the language specific stemmer directly:
>>> from nltk.stem.snowball import GermanStemmer
>>> stemmer = GermanStemmer()
>>> stemmer.stem("Autobahnen")
'autobahn'
:param language: The language whose subclass is instantiated.
:type language: str or unicode
:param ignore_stopwords: If set to True, stopwords are
not stemmed and returned unchanged.
Set to False by default.
:type ignore_stopwords: bool
:raise ValueError: If there is no stemmer for the specified
language, a ValueError is raised.
"""
languages = (
"arabic",
"danish",
"dutch",
"english",
"finnish",
"french",
"german",
"hungarian",
"italian",
"norwegian",
"porter",
"portuguese",
"romanian",
"russian",
"spanish",
"swedish",
)
def __init__(self, language, ignore_stopwords=False):
if language not in self.languages:
raise ValueError(f"The language '{language}' is not supported.")
stemmerclass = globals()[language.capitalize() + "Stemmer"]
self.stemmer = stemmerclass(ignore_stopwords)
self.stem = self.stemmer.stem
self.stopwords = self.stemmer.stopwords
def stem(self, token):
return self.stemmer.stem(self, token)
udhr: UdhrCorpusReader = LazyCorpusLoader("udhr", UdhrCorpusReader)
The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem:
This function provides a demonstration of the Snowball stemmers. After invoking this function and specifying a language, it stems an excerpt of the Universal Declaration of Human Rights (which is a part of the NLTK corpus collection) and then prints out the original and the stemmed text.
Here is the function:
def demo():
"""
This function provides a demonstration of the Snowball stemmers.
After invoking this function and specifying a language,
it stems an excerpt of the Universal Declaration of Human Rights
(which is a part of the NLTK corpus collection) and then prints
out the original and the stemmed text.
"""
from nltk.corpus import udhr
udhr_corpus = {
"arabic": "Arabic_Alarabia-Arabic",
"danish": "Danish_Dansk-Latin1",
"dutch": "Dutch_Nederlands-Latin1",
"english": "English-Latin1",
"finnish": "Finnish_Suomi-Latin1",
"french": "French_Francais-Latin1",
"german": "German_Deutsch-Latin1",
"hungarian": "Hungarian_Magyar-UTF8",
"italian": "Italian_Italiano-Latin1",
"norwegian": "Norwegian-Latin1",
"porter": "English-Latin1",
"portuguese": "Portuguese_Portugues-Latin1",
"romanian": "Romanian_Romana-Latin2",
"russian": "Russian-UTF8",
"spanish": "Spanish-Latin1",
"swedish": "Swedish_Svenska-Latin1",
}
print("\n")
print("******************************")
print("Demo for the Snowball stemmers")
print("******************************")
while True:
language = input(
"Please enter the name of the language "
+ "to be demonstrated\n"
+ "/".join(SnowballStemmer.languages)
+ "\n"
+ "(enter 'exit' in order to leave): "
)
if language == "exit":
break
if language not in SnowballStemmer.languages:
print(
"\nOops, there is no stemmer for this language. "
+ "Please try again.\n"
)
continue
stemmer = SnowballStemmer(language)
excerpt = udhr.words(udhr_corpus[language])[:300]
stemmed = " ".join(stemmer.stem(word) for word in excerpt)
stemmed = re.sub(r"(.{,70})\s", r"\1\n", stemmed + " ").rstrip()
excerpt = " ".join(excerpt)
excerpt = re.sub(r"(.{,70})\s", r"\1\n", excerpt + " ").rstrip()
print("\n")
print("-" * 70)
print("ORIGINAL".center(70))
print(excerpt)
print("\n\n")
print("STEMMED RESULTS".center(70))
print(stemmed)
print("-" * 70)
print("\n") | This function provides a demonstration of the Snowball stemmers. After invoking this function and specifying a language, it stems an excerpt of the Universal Declaration of Human Rights (which is a part of the NLTK corpus collection) and then prints out the original and the stemmed text. |
170,890 | import re
from nltk.stem.api import StemmerI
class PorterStemmer(StemmerI):
"""
A word stemmer based on the Porter stemming algorithm.
Porter, M. "An algorithm for suffix stripping."
Program 14.3 (1980): 130-137.
See https://www.tartarus.org/~martin/PorterStemmer/ for the homepage
of the algorithm.
Martin Porter has endorsed several modifications to the Porter
algorithm since writing his original paper, and those extensions are
included in the implementations on his website. Additionally, others
have proposed further improvements to the algorithm, including NLTK
contributors. There are thus three modes that can be selected by
passing the appropriate constant to the class constructor's `mode`
attribute:
- PorterStemmer.ORIGINAL_ALGORITHM
An implementation that is faithful to the original paper.
Note that Martin Porter has deprecated this version of the
algorithm. Martin distributes implementations of the Porter
Stemmer in many languages, hosted at:
https://www.tartarus.org/~martin/PorterStemmer/
and all of these implementations include his extensions. He
strongly recommends against using the original, published
version of the algorithm; only use this mode if you clearly
understand why you are choosing to do so.
- PorterStemmer.MARTIN_EXTENSIONS
An implementation that only uses the modifications to the
algorithm that are included in the implementations on Martin
Porter's website. He has declared Porter frozen, so the
behaviour of those implementations should never change.
- PorterStemmer.NLTK_EXTENSIONS (default)
An implementation that includes further improvements devised by
NLTK contributors or taken from other modified implementations
found on the web.
For the best stemming, you should use the default NLTK_EXTENSIONS
version. However, if you need to get the same results as either the
original algorithm or one of Martin Porter's hosted versions for
compatibility with an existing implementation or dataset, you can use
one of the other modes instead.
"""
# Modes the Stemmer can be instantiated in
NLTK_EXTENSIONS = "NLTK_EXTENSIONS"
MARTIN_EXTENSIONS = "MARTIN_EXTENSIONS"
ORIGINAL_ALGORITHM = "ORIGINAL_ALGORITHM"
def __init__(self, mode=NLTK_EXTENSIONS):
if mode not in (
self.NLTK_EXTENSIONS,
self.MARTIN_EXTENSIONS,
self.ORIGINAL_ALGORITHM,
):
raise ValueError(
"Mode must be one of PorterStemmer.NLTK_EXTENSIONS, "
"PorterStemmer.MARTIN_EXTENSIONS, or "
"PorterStemmer.ORIGINAL_ALGORITHM"
)
self.mode = mode
if self.mode == self.NLTK_EXTENSIONS:
# This is a table of irregular forms. It is quite short,
# but still reflects the errors actually drawn to Martin
# Porter's attention over a 20 year period!
irregular_forms = {
"sky": ["sky", "skies"],
"die": ["dying"],
"lie": ["lying"],
"tie": ["tying"],
"news": ["news"],
"inning": ["innings", "inning"],
"outing": ["outings", "outing"],
"canning": ["cannings", "canning"],
"howe": ["howe"],
"proceed": ["proceed"],
"exceed": ["exceed"],
"succeed": ["succeed"],
}
self.pool = {}
for key in irregular_forms:
for val in irregular_forms[key]:
self.pool[val] = key
self.vowels = frozenset(["a", "e", "i", "o", "u"])
def _is_consonant(self, word, i):
"""Returns True if word[i] is a consonant, False otherwise
A consonant is defined in the paper as follows:
A consonant in a word is a letter other than A, E, I, O or
U, and other than Y preceded by a consonant. (The fact that
the term `consonant' is defined to some extent in terms of
itself does not make it ambiguous.) So in TOY the consonants
are T and Y, and in SYZYGY they are S, Z and G. If a letter
is not a consonant it is a vowel.
"""
if word[i] in self.vowels:
return False
if word[i] == "y":
if i == 0:
return True
else:
return not self._is_consonant(word, i - 1)
return True
def _measure(self, stem):
r"""Returns the 'measure' of stem, per definition in the paper
From the paper:
A consonant will be denoted by c, a vowel by v. A list
ccc... of length greater than 0 will be denoted by C, and a
list vvv... of length greater than 0 will be denoted by V.
Any word, or part of a word, therefore has one of the four
forms:
CVCV ... C
CVCV ... V
VCVC ... C
VCVC ... V
These may all be represented by the single form
[C]VCVC ... [V]
where the square brackets denote arbitrary presence of their
contents. Using (VC){m} to denote VC repeated m times, this
may again be written as
[C](VC){m}[V].
m will be called the \measure\ of any word or word part when
represented in this form. The case m = 0 covers the null
word. Here are some examples:
m=0 TR, EE, TREE, Y, BY.
m=1 TROUBLE, OATS, TREES, IVY.
m=2 TROUBLES, PRIVATE, OATEN, ORRERY.
"""
cv_sequence = ""
# Construct a string of 'c's and 'v's representing whether each
# character in `stem` is a consonant or a vowel.
# e.g. 'falafel' becomes 'cvcvcvc',
# 'architecture' becomes 'vcccvcvccvcv'
for i in range(len(stem)):
if self._is_consonant(stem, i):
cv_sequence += "c"
else:
cv_sequence += "v"
# Count the number of 'vc' occurrences, which is equivalent to
# the number of 'VC' occurrences in Porter's reduced form in the
# docstring above, which is in turn equivalent to `m`
return cv_sequence.count("vc")
def _has_positive_measure(self, stem):
return self._measure(stem) > 0
def _contains_vowel(self, stem):
"""Returns True if stem contains a vowel, else False"""
for i in range(len(stem)):
if not self._is_consonant(stem, i):
return True
return False
def _ends_double_consonant(self, word):
"""Implements condition *d from the paper
Returns True if word ends with a double consonant
"""
return (
len(word) >= 2
and word[-1] == word[-2]
and self._is_consonant(word, len(word) - 1)
)
def _ends_cvc(self, word):
"""Implements condition *o from the paper
From the paper:
*o - the stem ends cvc, where the second c is not W, X or Y
(e.g. -WIL, -HOP).
"""
return (
len(word) >= 3
and self._is_consonant(word, len(word) - 3)
and not self._is_consonant(word, len(word) - 2)
and self._is_consonant(word, len(word) - 1)
and word[-1] not in ("w", "x", "y")
) or (
self.mode == self.NLTK_EXTENSIONS
and len(word) == 2
and not self._is_consonant(word, 0)
and self._is_consonant(word, 1)
)
def _replace_suffix(self, word, suffix, replacement):
"""Replaces `suffix` of `word` with `replacement"""
assert word.endswith(suffix), "Given word doesn't end with given suffix"
if suffix == "":
return word + replacement
else:
return word[: -len(suffix)] + replacement
def _apply_rule_list(self, word, rules):
"""Applies the first applicable suffix-removal rule to the word
Takes a word and a list of suffix-removal rules represented as
3-tuples, with the first element being the suffix to remove,
the second element being the string to replace it with, and the
final element being the condition for the rule to be applicable,
or None if the rule is unconditional.
"""
for rule in rules:
suffix, replacement, condition = rule
if suffix == "*d" and self._ends_double_consonant(word):
stem = word[:-2]
if condition is None or condition(stem):
return stem + replacement
else:
# Don't try any further rules
return word
if word.endswith(suffix):
stem = self._replace_suffix(word, suffix, "")
if condition is None or condition(stem):
return stem + replacement
else:
# Don't try any further rules
return word
return word
def _step1a(self, word):
"""Implements Step 1a from "An algorithm for suffix stripping"
From the paper:
SSES -> SS caresses -> caress
IES -> I ponies -> poni
ties -> ti
SS -> SS caress -> caress
S -> cats -> cat
"""
# this NLTK-only rule extends the original algorithm, so
# that 'flies'->'fli' but 'dies'->'die' etc
if self.mode == self.NLTK_EXTENSIONS:
if word.endswith("ies") and len(word) == 4:
return self._replace_suffix(word, "ies", "ie")
return self._apply_rule_list(
word,
[
("sses", "ss", None), # SSES -> SS
("ies", "i", None), # IES -> I
("ss", "ss", None), # SS -> SS
("s", "", None), # S ->
],
)
def _step1b(self, word):
"""Implements Step 1b from "An algorithm for suffix stripping"
From the paper:
(m>0) EED -> EE feed -> feed
agreed -> agree
(*v*) ED -> plastered -> plaster
bled -> bled
(*v*) ING -> motoring -> motor
sing -> sing
If the second or third of the rules in Step 1b is successful,
the following is done:
AT -> ATE conflat(ed) -> conflate
BL -> BLE troubl(ed) -> trouble
IZ -> IZE siz(ed) -> size
(*d and not (*L or *S or *Z))
-> single letter
hopp(ing) -> hop
tann(ed) -> tan
fall(ing) -> fall
hiss(ing) -> hiss
fizz(ed) -> fizz
(m=1 and *o) -> E fail(ing) -> fail
fil(ing) -> file
The rule to map to a single letter causes the removal of one of
the double letter pair. The -E is put back on -AT, -BL and -IZ,
so that the suffixes -ATE, -BLE and -IZE can be recognised
later. This E may be removed in step 4.
"""
# this NLTK-only block extends the original algorithm, so that
# 'spied'->'spi' but 'died'->'die' etc
if self.mode == self.NLTK_EXTENSIONS:
if word.endswith("ied"):
if len(word) == 4:
return self._replace_suffix(word, "ied", "ie")
else:
return self._replace_suffix(word, "ied", "i")
# (m>0) EED -> EE
if word.endswith("eed"):
stem = self._replace_suffix(word, "eed", "")
if self._measure(stem) > 0:
return stem + "ee"
else:
return word
rule_2_or_3_succeeded = False
for suffix in ["ed", "ing"]:
if word.endswith(suffix):
intermediate_stem = self._replace_suffix(word, suffix, "")
if self._contains_vowel(intermediate_stem):
rule_2_or_3_succeeded = True
break
if not rule_2_or_3_succeeded:
return word
return self._apply_rule_list(
intermediate_stem,
[
("at", "ate", None), # AT -> ATE
("bl", "ble", None), # BL -> BLE
("iz", "ize", None), # IZ -> IZE
# (*d and not (*L or *S or *Z))
# -> single letter
(
"*d",
intermediate_stem[-1],
lambda stem: intermediate_stem[-1] not in ("l", "s", "z"),
),
# (m=1 and *o) -> E
(
"",
"e",
lambda stem: (self._measure(stem) == 1 and self._ends_cvc(stem)),
),
],
)
def _step1c(self, word):
"""Implements Step 1c from "An algorithm for suffix stripping"
From the paper:
Step 1c
(*v*) Y -> I happy -> happi
sky -> sky
"""
def nltk_condition(stem):
"""
This has been modified from the original Porter algorithm so
that y->i is only done when y is preceded by a consonant,
but not if the stem is only a single consonant, i.e.
(*c and not c) Y -> I
So 'happy' -> 'happi', but
'enjoy' -> 'enjoy' etc
This is a much better rule. Formerly 'enjoy'->'enjoi' and
'enjoyment'->'enjoy'. Step 1c is perhaps done too soon; but
with this modification that no longer really matters.
Also, the removal of the contains_vowel(z) condition means
that 'spy', 'fly', 'try' ... stem to 'spi', 'fli', 'tri' and
conflate with 'spied', 'tried', 'flies' ...
"""
return len(stem) > 1 and self._is_consonant(stem, len(stem) - 1)
def original_condition(stem):
return self._contains_vowel(stem)
return self._apply_rule_list(
word,
[
(
"y",
"i",
nltk_condition
if self.mode == self.NLTK_EXTENSIONS
else original_condition,
)
],
)
def _step2(self, word):
"""Implements Step 2 from "An algorithm for suffix stripping"
From the paper:
Step 2
(m>0) ATIONAL -> ATE relational -> relate
(m>0) TIONAL -> TION conditional -> condition
rational -> rational
(m>0) ENCI -> ENCE valenci -> valence
(m>0) ANCI -> ANCE hesitanci -> hesitance
(m>0) IZER -> IZE digitizer -> digitize
(m>0) ABLI -> ABLE conformabli -> conformable
(m>0) ALLI -> AL radicalli -> radical
(m>0) ENTLI -> ENT differentli -> different
(m>0) ELI -> E vileli - > vile
(m>0) OUSLI -> OUS analogousli -> analogous
(m>0) IZATION -> IZE vietnamization -> vietnamize
(m>0) ATION -> ATE predication -> predicate
(m>0) ATOR -> ATE operator -> operate
(m>0) ALISM -> AL feudalism -> feudal
(m>0) IVENESS -> IVE decisiveness -> decisive
(m>0) FULNESS -> FUL hopefulness -> hopeful
(m>0) OUSNESS -> OUS callousness -> callous
(m>0) ALITI -> AL formaliti -> formal
(m>0) IVITI -> IVE sensitiviti -> sensitive
(m>0) BILITI -> BLE sensibiliti -> sensible
"""
if self.mode == self.NLTK_EXTENSIONS:
# Instead of applying the ALLI -> AL rule after '(a)bli' per
# the published algorithm, instead we apply it first, and,
# if it succeeds, run the result through step2 again.
if word.endswith("alli") and self._has_positive_measure(
self._replace_suffix(word, "alli", "")
):
return self._step2(self._replace_suffix(word, "alli", "al"))
bli_rule = ("bli", "ble", self._has_positive_measure)
abli_rule = ("abli", "able", self._has_positive_measure)
rules = [
("ational", "ate", self._has_positive_measure),
("tional", "tion", self._has_positive_measure),
("enci", "ence", self._has_positive_measure),
("anci", "ance", self._has_positive_measure),
("izer", "ize", self._has_positive_measure),
abli_rule if self.mode == self.ORIGINAL_ALGORITHM else bli_rule,
("alli", "al", self._has_positive_measure),
("entli", "ent", self._has_positive_measure),
("eli", "e", self._has_positive_measure),
("ousli", "ous", self._has_positive_measure),
("ization", "ize", self._has_positive_measure),
("ation", "ate", self._has_positive_measure),
("ator", "ate", self._has_positive_measure),
("alism", "al", self._has_positive_measure),
("iveness", "ive", self._has_positive_measure),
("fulness", "ful", self._has_positive_measure),
("ousness", "ous", self._has_positive_measure),
("aliti", "al", self._has_positive_measure),
("iviti", "ive", self._has_positive_measure),
("biliti", "ble", self._has_positive_measure),
]
if self.mode == self.NLTK_EXTENSIONS:
rules.append(("fulli", "ful", self._has_positive_measure))
# The 'l' of the 'logi' -> 'log' rule is put with the stem,
# so that short stems like 'geo' 'theo' etc work like
# 'archaeo' 'philo' etc.
rules.append(
("logi", "log", lambda stem: self._has_positive_measure(word[:-3]))
)
if self.mode == self.MARTIN_EXTENSIONS:
rules.append(("logi", "log", self._has_positive_measure))
return self._apply_rule_list(word, rules)
def _step3(self, word):
"""Implements Step 3 from "An algorithm for suffix stripping"
From the paper:
Step 3
(m>0) ICATE -> IC triplicate -> triplic
(m>0) ATIVE -> formative -> form
(m>0) ALIZE -> AL formalize -> formal
(m>0) ICITI -> IC electriciti -> electric
(m>0) ICAL -> IC electrical -> electric
(m>0) FUL -> hopeful -> hope
(m>0) NESS -> goodness -> good
"""
return self._apply_rule_list(
word,
[
("icate", "ic", self._has_positive_measure),
("ative", "", self._has_positive_measure),
("alize", "al", self._has_positive_measure),
("iciti", "ic", self._has_positive_measure),
("ical", "ic", self._has_positive_measure),
("ful", "", self._has_positive_measure),
("ness", "", self._has_positive_measure),
],
)
def _step4(self, word):
"""Implements Step 4 from "An algorithm for suffix stripping"
Step 4
(m>1) AL -> revival -> reviv
(m>1) ANCE -> allowance -> allow
(m>1) ENCE -> inference -> infer
(m>1) ER -> airliner -> airlin
(m>1) IC -> gyroscopic -> gyroscop
(m>1) ABLE -> adjustable -> adjust
(m>1) IBLE -> defensible -> defens
(m>1) ANT -> irritant -> irrit
(m>1) EMENT -> replacement -> replac
(m>1) MENT -> adjustment -> adjust
(m>1) ENT -> dependent -> depend
(m>1 and (*S or *T)) ION -> adoption -> adopt
(m>1) OU -> homologou -> homolog
(m>1) ISM -> communism -> commun
(m>1) ATE -> activate -> activ
(m>1) ITI -> angulariti -> angular
(m>1) OUS -> homologous -> homolog
(m>1) IVE -> effective -> effect
(m>1) IZE -> bowdlerize -> bowdler
The suffixes are now removed. All that remains is a little
tidying up.
"""
measure_gt_1 = lambda stem: self._measure(stem) > 1
return self._apply_rule_list(
word,
[
("al", "", measure_gt_1),
("ance", "", measure_gt_1),
("ence", "", measure_gt_1),
("er", "", measure_gt_1),
("ic", "", measure_gt_1),
("able", "", measure_gt_1),
("ible", "", measure_gt_1),
("ant", "", measure_gt_1),
("ement", "", measure_gt_1),
("ment", "", measure_gt_1),
("ent", "", measure_gt_1),
# (m>1 and (*S or *T)) ION ->
(
"ion",
"",
lambda stem: self._measure(stem) > 1 and stem[-1] in ("s", "t"),
),
("ou", "", measure_gt_1),
("ism", "", measure_gt_1),
("ate", "", measure_gt_1),
("iti", "", measure_gt_1),
("ous", "", measure_gt_1),
("ive", "", measure_gt_1),
("ize", "", measure_gt_1),
],
)
def _step5a(self, word):
"""Implements Step 5a from "An algorithm for suffix stripping"
From the paper:
Step 5a
(m>1) E -> probate -> probat
rate -> rate
(m=1 and not *o) E -> cease -> ceas
"""
# Note that Martin's test vocabulary and reference
# implementations are inconsistent in how they handle the case
# where two rules both refer to a suffix that matches the word
# to be stemmed, but only the condition of the second one is
# true.
# Earlier in step2b we had the rules:
# (m>0) EED -> EE
# (*v*) ED ->
# but the examples in the paper included "feed"->"feed", even
# though (*v*) is true for "fe" and therefore the second rule
# alone would map "feed"->"fe".
# However, in THIS case, we need to handle the consecutive rules
# differently and try both conditions (obviously; the second
# rule here would be redundant otherwise). Martin's paper makes
# no explicit mention of the inconsistency; you have to infer it
# from the examples.
# For this reason, we can't use _apply_rule_list here.
if word.endswith("e"):
stem = self._replace_suffix(word, "e", "")
if self._measure(stem) > 1:
return stem
if self._measure(stem) == 1 and not self._ends_cvc(stem):
return stem
return word
def _step5b(self, word):
"""Implements Step 5a from "An algorithm for suffix stripping"
From the paper:
Step 5b
(m > 1 and *d and *L) -> single letter
controll -> control
roll -> roll
"""
return self._apply_rule_list(
word, [("ll", "l", lambda stem: self._measure(word[:-1]) > 1)]
)
def stem(self, word, to_lowercase=True):
"""
:param to_lowercase: if `to_lowercase=True` the word always lowercase
"""
stem = word.lower() if to_lowercase else word
if self.mode == self.NLTK_EXTENSIONS and word in self.pool:
return self.pool[stem]
if self.mode != self.ORIGINAL_ALGORITHM and len(word) <= 2:
# With this line, strings of length 1 or 2 don't go through
# the stemming process, although no mention is made of this
# in the published algorithm.
return stem
stem = self._step1a(stem)
stem = self._step1b(stem)
stem = self._step1c(stem)
stem = self._step2(stem)
stem = self._step3(stem)
stem = self._step4(stem)
stem = self._step5a(stem)
stem = self._step5b(stem)
return stem
def __repr__(self):
return "<PorterStemmer>"
treebank: BracketParseCorpusReader = LazyCorpusLoader(
"treebank/combined",
BracketParseCorpusReader,
r"wsj_.*\.mrg",
tagset="wsj",
encoding="ascii",
)
The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem:
A demonstration of the porter stemmer on a sample from the Penn Treebank corpus.
Here is the function:
def demo():
"""
A demonstration of the porter stemmer on a sample from
the Penn Treebank corpus.
"""
from nltk import stem
from nltk.corpus import treebank
stemmer = stem.PorterStemmer()
orig = []
stemmed = []
for item in treebank.fileids()[:3]:
for (word, tag) in treebank.tagged_words(item):
orig.append(word)
stemmed.append(stemmer.stem(word))
# Convert the results to a string, and word-wrap them.
results = " ".join(stemmed)
results = re.sub(r"(.{,70})\s", r"\1\n", results + " ").rstrip()
# Convert the original to a string, and word wrap it.
original = " ".join(orig)
original = re.sub(r"(.{,70})\s", r"\1\n", original + " ").rstrip()
# Print the results.
print("-Original-".center(70).replace(" ", "*").replace("-", " "))
print(original)
print("-Results-".center(70).replace(" ", "*").replace("-", " "))
print(results)
print("*" * 70) | A demonstration of the porter stemmer on a sample from the Penn Treebank corpus. |
170,891 | def selection(a):
"""
Selection Sort: scan the list to find its smallest element, then
swap it with the first element. The remainder of the list is one
element smaller; apply the same method to this list, and so on.
"""
count = 0
for i in range(len(a) - 1):
min = i
for j in range(i + 1, len(a)):
if a[j] < a[min]:
min = j
count += 1
a[min], a[i] = a[i], a[min]
return count
def bubble(a):
"""
Bubble Sort: compare adjacent elements of the list left-to-right,
and swap them if they are out of order. After one pass through
the list swapping adjacent items, the largest item will be in
the rightmost position. The remainder is one element smaller;
apply the same method to this list, and so on.
"""
count = 0
for i in range(len(a) - 1):
for j in range(len(a) - i - 1):
if a[j + 1] < a[j]:
a[j], a[j + 1] = a[j + 1], a[j]
count += 1
return count
def merge(a):
"""
Merge Sort: split the list in half, and sort each half, then
combine the sorted halves.
"""
count = 0
if len(a) > 1:
midpoint = len(a) // 2
b = a[:midpoint]
c = a[midpoint:]
count_b = merge(b)
count_c = merge(c)
result, count_a = _merge_lists(b, c)
a[:] = result # copy the result back into a.
count = count_a + count_b + count_c
return count
def quick(a):
return _quick(a, 0, len(a) - 1)
def shuffle(x: MutableSequence[Any], random: Optional[Callable[[], float]] = ...) -> None: ...
def demo():
from random import shuffle
for size in (10, 20, 50, 100, 200, 500, 1000):
a = list(range(size))
# various sort methods
shuffle(a)
count_selection = selection(a)
shuffle(a)
count_bubble = bubble(a)
shuffle(a)
count_merge = merge(a)
shuffle(a)
count_quick = quick(a)
print(
("size=%5d: selection=%8d, bubble=%8d, " "merge=%6d, quick=%6d")
% (size, count_selection, count_bubble, count_merge, count_quick)
) | null |
170,892 | import random
def wordfinder(words, rows=20, cols=20, attempts=50, alph="ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
"""
Attempt to arrange words into a letter-grid with the specified
number of rows and columns. Try each word in several positions
and directions, until it can be fitted into the grid, or the
maximum number of allowable attempts is exceeded. Returns a tuple
consisting of the grid and the words that were successfully
placed.
:param words: the list of words to be put into the grid
:type words: list
:param rows: the number of rows in the grid
:type rows: int
:param cols: the number of columns in the grid
:type cols: int
:param attempts: the number of times to attempt placing a word
:type attempts: int
:param alph: the alphabet, to be used for filling blank cells
:type alph: list
:rtype: tuple
"""
# place longer words first
words = sorted(words, key=len, reverse=True)
grid = [] # the letter grid
used = [] # the words we used
# initialize the grid
for i in range(rows):
grid.append([""] * cols)
# try to place each word
for word in words:
word = word.strip().upper() # normalize
save = word # keep a record of the word
word = revword(word)
for attempt in range(attempts):
r = random.randint(0, len(word))
dir = random.choice([1, 2, 3, 4])
x = random.randint(0, rows)
y = random.randint(0, cols)
if dir == 1:
x += r
y += r
elif dir == 2:
x += r
elif dir == 3:
x += r
y -= r
elif dir == 4:
y += r
if 0 <= x < rows and 0 <= y < cols:
if check(word, dir, x, y, grid, rows, cols):
# used.append((save, dir, x, y, word))
used.append(save)
break
# Fill up the remaining spaces
for i in range(rows):
for j in range(cols):
if grid[i][j] == "":
grid[i][j] = random.choice(alph)
return grid, used
words: WordListCorpusReader = LazyCorpusLoader(
"words", WordListCorpusReader, r"(?!README|\.).*", encoding="ascii"
)
def word_finder():
from nltk.corpus import words
wordlist = words.words()
random.shuffle(wordlist)
wordlist = wordlist[:200]
wordlist = [w for w in wordlist if 3 <= len(w) <= 12]
grid, used = wordfinder(wordlist)
print("Word Finder\n")
for i in range(len(grid)):
for j in range(len(grid[i])):
print(grid[i][j], end=" ")
print()
print()
for i in range(len(used)):
print("%d:" % (i + 1), used[i]) | null |
170,893 |
def babelize_shell():
print("Babelfish online translation service is no longer available.") | null |
170,894 | leadins = """To characterize a linguistic level L,
On the other hand,
This suggests that
It appears that
Furthermore,
We will bring evidence in favor of the following thesis:
To provide a constituent structure for T(Z,K),
From C1, it follows that
For any transformation which is sufficiently diversified in \
application to be of any interest,
Analogously,
Clearly,
Note that
Of course,
Suppose, for instance, that
Thus
With this clarification,
Conversely,
We have already seen that
By combining adjunctions and certain deformations,
I suggested that these results would follow from the assumption that
If the position of the trace in (99c) were only relatively \
inaccessible to movement,
However, this assumption is not correct, since
Comparing these examples with their parasitic gap counterparts in \
(96) and (97), we see that
In the discussion of resumptive pronouns following (81),
So far,
Nevertheless,
For one thing,
Summarizing, then, we assume that
A consequence of the approach just outlined is that
Presumably,
On our assumptions,
It may be, then, that
It must be emphasized, once again, that
Let us continue to suppose that
Notice, incidentally, that """
subjects = """ the notion of level of grammaticalness
a case of semigrammaticalness of a different sort
most of the methodological work in modern linguistics
a subset of English sentences interesting on quite independent grounds
the natural general principle that will subsume this case
an important property of these three types of EC
any associated supporting element
the appearance of parasitic gaps in domains relatively inaccessible \
to ordinary extraction
the speaker-hearer's linguistic intuition
the descriptive power of the base component
the earlier discussion of deviance
this analysis of a formative as a pair of sets of features
this selectionally introduced contextual feature
a descriptively adequate grammar
the fundamental error of regarding functional notions as categorial
relational information
the systematic use of complex symbols
the theory of syntactic features developed earlier"""
verbs = """can be defined in such a way as to impose
delimits
suffices to account for
cannot be arbitrary in
is not subject to
does not readily tolerate
raises serious doubts about
is not quite equivalent to
does not affect the structure of
may remedy and, at the same time, eliminate
is not to be considered in determining
is to be regarded as
is unspecified with respect to
is, apparently, determined by
is necessary to impose an interpretation on
appears to correlate rather closely with
is rather different from"""
objects = """ problems of phonemic and morphological analysis.
a corpus of utterance tokens upon which conformity has been defined \
by the paired utterance test.
the traditional practice of grammarians.
the levels of acceptability from fairly high (e.g. (99a)) to virtual \
gibberish (e.g. (98d)).
a stipulation to place the constructions into these various categories.
a descriptive fact.
a parasitic gap construction.
the extended c-command discussed in connection with (34).
the ultimate standard that determines the accuracy of any proposed grammar.
the system of base rules exclusive of the lexicon.
irrelevant intervening contexts in selectional rules.
nondistinctness in the sense of distinctive feature theory.
a general convention regarding the forms of the grammar.
an abstract underlying order.
an important distinction in language use.
the requirement that branching is not tolerated within the dominance \
scope of a complex symbol.
the strong generative capacity of the theory."""
import random
import textwrap
from itertools import chain, islice
class chain(Iterator[_T], Generic[_T]):
def __init__(self, *iterables: Iterable[_T]) -> None: ...
def __next__(self) -> _T: ...
def __iter__(self) -> Iterator[_T]: ...
def from_iterable(iterable: Iterable[Iterable[_S]]) -> Iterator[_S]: ...
def islice(iterable: Iterable[_T], stop: Optional[int]) -> Iterator[_T]: ...
def islice(iterable: Iterable[_T], start: Optional[int], stop: Optional[int], step: Optional[int] = ...) -> Iterator[_T]: ...
def generate_chomsky(times=5, line_length=72):
parts = []
for part in (leadins, subjects, verbs, objects):
phraselist = list(map(str.strip, part.splitlines()))
random.shuffle(phraselist)
parts.append(phraselist)
output = chain.from_iterable(islice(zip(*parts), 0, times))
print(textwrap.fill(" ".join(output), line_length)) | null |
170,895 | import re
from nltk.metrics import accuracy as _accuracy
from nltk.tag.mapping import map_tag
from nltk.tag.util import str2tuple
from nltk.tree import Tree
def tree2conlltags(t):
"""
Return a list of 3-tuples containing ``(word, tag, IOB-tag)``.
Convert a tree to the CoNLL IOB tag format.
:param t: The tree to be converted.
:type t: Tree
:rtype: list(tuple)
"""
tags = []
for child in t:
try:
category = child.label()
prefix = "B-"
for contents in child:
if isinstance(contents, Tree):
raise ValueError(
"Tree is too deeply nested to be printed in CoNLL format"
)
tags.append((contents[0], contents[1], prefix + category))
prefix = "I-"
except AttributeError:
tags.append((child[0], child[1], "O"))
return tags
The provided code snippet includes necessary dependencies for implementing the `accuracy` function. Write a Python function `def accuracy(chunker, gold)` to solve the following problem:
Score the accuracy of the chunker against the gold standard. Strip the chunk information from the gold standard and rechunk it using the chunker, then compute the accuracy score. :type chunker: ChunkParserI :param chunker: The chunker being evaluated. :type gold: tree :param gold: The chunk structures to score the chunker on. :rtype: float
Here is the function:
def accuracy(chunker, gold):
"""
Score the accuracy of the chunker against the gold standard.
Strip the chunk information from the gold standard and rechunk it using
the chunker, then compute the accuracy score.
:type chunker: ChunkParserI
:param chunker: The chunker being evaluated.
:type gold: tree
:param gold: The chunk structures to score the chunker on.
:rtype: float
"""
gold_tags = []
test_tags = []
for gold_tree in gold:
test_tree = chunker.parse(gold_tree.flatten())
gold_tags += tree2conlltags(gold_tree)
test_tags += tree2conlltags(test_tree)
# print 'GOLD:', gold_tags[:50]
# print 'TEST:', test_tags[:50]
return _accuracy(gold_tags, test_tags) | Score the accuracy of the chunker against the gold standard. Strip the chunk information from the gold standard and rechunk it using the chunker, then compute the accuracy score. :type chunker: ChunkParserI :param chunker: The chunker being evaluated. :type gold: tree :param gold: The chunk structures to score the chunker on. :rtype: float |
170,896 | import re
from nltk.metrics import accuracy as _accuracy
from nltk.tag.mapping import map_tag
from nltk.tag.util import str2tuple
from nltk.tree import Tree
def _chunksets(t, count, chunk_label):
pos = 0
chunks = []
for child in t:
if isinstance(child, Tree):
if re.match(chunk_label, child.label()):
chunks.append(((count, pos), child.freeze()))
pos += len(child.leaves())
else:
pos += 1
return set(chunks) | null |
170,897 | import re
from nltk.metrics import accuracy as _accuracy
from nltk.tag.mapping import map_tag
from nltk.tag.util import str2tuple
from nltk.tree import Tree
The provided code snippet includes necessary dependencies for implementing the `conlltags2tree` function. Write a Python function `def conlltags2tree( sentence, chunk_types=("NP", "PP", "VP"), root_label="S", strict=False )` to solve the following problem:
Convert the CoNLL IOB format to a tree.
Here is the function:
def conlltags2tree(
sentence, chunk_types=("NP", "PP", "VP"), root_label="S", strict=False
):
"""
Convert the CoNLL IOB format to a tree.
"""
tree = Tree(root_label, [])
for (word, postag, chunktag) in sentence:
if chunktag is None:
if strict:
raise ValueError("Bad conll tag sequence")
else:
# Treat as O
tree.append((word, postag))
elif chunktag.startswith("B-"):
tree.append(Tree(chunktag[2:], [(word, postag)]))
elif chunktag.startswith("I-"):
if (
len(tree) == 0
or not isinstance(tree[-1], Tree)
or tree[-1].label() != chunktag[2:]
):
if strict:
raise ValueError("Bad conll tag sequence")
else:
# Treat as B-*
tree.append(Tree(chunktag[2:], [(word, postag)]))
else:
tree[-1].append((word, postag))
elif chunktag == "O":
tree.append((word, postag))
else:
raise ValueError(f"Bad conll tag {chunktag!r}")
return tree | Convert the CoNLL IOB format to a tree. |
170,898 | import re
from nltk.metrics import accuracy as _accuracy
from nltk.tag.mapping import map_tag
from nltk.tag.util import str2tuple
from nltk.tree import Tree
_IEER_DOC_RE = re.compile(
r"<DOC>\s*"
r"(<DOCNO>\s*(?P<docno>.+?)\s*</DOCNO>\s*)?"
r"(<DOCTYPE>\s*(?P<doctype>.+?)\s*</DOCTYPE>\s*)?"
r"(<DATE_TIME>\s*(?P<date_time>.+?)\s*</DATE_TIME>\s*)?"
r"<BODY>\s*"
r"(<HEADLINE>\s*(?P<headline>.+?)\s*</HEADLINE>\s*)?"
r"<TEXT>(?P<text>.*?)</TEXT>\s*"
r"</BODY>\s*</DOC>\s*",
re.DOTALL,
)
def _ieer_read_text(s, root_label):
stack = [Tree(root_label, [])]
# s will be None if there is no headline in the text
# return the empty list in place of a Tree
if s is None:
return []
for piece_m in re.finditer(r"<[^>]+>|[^\s<]+", s):
piece = piece_m.group()
try:
if piece.startswith("<b_"):
m = _IEER_TYPE_RE.match(piece)
if m is None:
print("XXXX", piece)
chunk = Tree(m.group("type"), [])
stack[-1].append(chunk)
stack.append(chunk)
elif piece.startswith("<e_"):
stack.pop()
# elif piece.startswith('<'):
# print "ERROR:", piece
# raise ValueError # Unexpected HTML
else:
stack[-1].append(piece)
except (IndexError, ValueError) as e:
raise ValueError(
f"Bad IEER string (error at character {piece_m.start():d})"
) from e
if len(stack) != 1:
raise ValueError("Bad IEER string")
return stack[0]
The provided code snippet includes necessary dependencies for implementing the `ieerstr2tree` function. Write a Python function `def ieerstr2tree( s, chunk_types=[ "LOCATION", "ORGANIZATION", "PERSON", "DURATION", "DATE", "CARDINAL", "PERCENT", "MONEY", "MEASURE", ], root_label="S", )` to solve the following problem:
Return a chunk structure containing the chunked tagged text that is encoded in the given IEER style string. Convert a string of chunked tagged text in the IEER named entity format into a chunk structure. Chunks are of several types, LOCATION, ORGANIZATION, PERSON, DURATION, DATE, CARDINAL, PERCENT, MONEY, and MEASURE. :rtype: Tree
Here is the function:
def ieerstr2tree(
s,
chunk_types=[
"LOCATION",
"ORGANIZATION",
"PERSON",
"DURATION",
"DATE",
"CARDINAL",
"PERCENT",
"MONEY",
"MEASURE",
],
root_label="S",
):
"""
Return a chunk structure containing the chunked tagged text that is
encoded in the given IEER style string.
Convert a string of chunked tagged text in the IEER named
entity format into a chunk structure. Chunks are of several
types, LOCATION, ORGANIZATION, PERSON, DURATION, DATE, CARDINAL,
PERCENT, MONEY, and MEASURE.
:rtype: Tree
"""
# Try looking for a single document. If that doesn't work, then just
# treat everything as if it was within the <TEXT>...</TEXT>.
m = _IEER_DOC_RE.match(s)
if m:
return {
"text": _ieer_read_text(m.group("text"), root_label),
"docno": m.group("docno"),
"doctype": m.group("doctype"),
"date_time": m.group("date_time"),
#'headline': m.group('headline')
# we want to capture NEs in the headline too!
"headline": _ieer_read_text(m.group("headline"), root_label),
}
else:
return _ieer_read_text(s, root_label) | Return a chunk structure containing the chunked tagged text that is encoded in the given IEER style string. Convert a string of chunked tagged text in the IEER named entity format into a chunk structure. Chunks are of several types, LOCATION, ORGANIZATION, PERSON, DURATION, DATE, CARDINAL, PERCENT, MONEY, and MEASURE. :rtype: Tree |
170,899 | import re
from nltk.metrics import accuracy as _accuracy
from nltk.tag.mapping import map_tag
from nltk.tag.util import str2tuple
from nltk.tree import Tree
def tagstr2tree(
s, chunk_label="NP", root_label="S", sep="/", source_tagset=None, target_tagset=None
):
"""
Divide a string of bracketted tagged text into
chunks and unchunked tokens, and produce a Tree.
Chunks are marked by square brackets (``[...]``). Words are
delimited by whitespace, and each word should have the form
``text/tag``. Words that do not contain a slash are
assigned a ``tag`` of None.
:param s: The string to be converted
:type s: str
:param chunk_label: The label to use for chunk nodes
:type chunk_label: str
:param root_label: The label to use for the root of the tree
:type root_label: str
:rtype: Tree
"""
WORD_OR_BRACKET = re.compile(r"\[|\]|[^\[\]\s]+")
stack = [Tree(root_label, [])]
for match in WORD_OR_BRACKET.finditer(s):
text = match.group()
if text[0] == "[":
if len(stack) != 1:
raise ValueError(f"Unexpected [ at char {match.start():d}")
chunk = Tree(chunk_label, [])
stack[-1].append(chunk)
stack.append(chunk)
elif text[0] == "]":
if len(stack) != 2:
raise ValueError(f"Unexpected ] at char {match.start():d}")
stack.pop()
else:
if sep is None:
stack[-1].append(text)
else:
word, tag = str2tuple(text, sep)
if source_tagset and target_tagset:
tag = map_tag(source_tagset, target_tagset, tag)
stack[-1].append((word, tag))
if len(stack) != 1:
raise ValueError(f"Expected ] at char {len(s):d}")
return stack[0]
def conllstr2tree(s, chunk_types=("NP", "PP", "VP"), root_label="S"):
"""
Return a chunk structure for a single sentence
encoded in the given CONLL 2000 style string.
This function converts a CoNLL IOB string into a tree.
It uses the specified chunk types
(defaults to NP, PP and VP), and creates a tree rooted at a node
labeled S (by default).
:param s: The CoNLL string to be converted.
:type s: str
:param chunk_types: The chunk types to be converted.
:type chunk_types: tuple
:param root_label: The node label to use for the root.
:type root_label: str
:rtype: Tree
"""
stack = [Tree(root_label, [])]
for lineno, line in enumerate(s.split("\n")):
if not line.strip():
continue
# Decode the line.
match = _LINE_RE.match(line)
if match is None:
raise ValueError(f"Error on line {lineno:d}")
(word, tag, state, chunk_type) = match.groups()
# If it's a chunk type we don't care about, treat it as O.
if chunk_types is not None and chunk_type not in chunk_types:
state = "O"
# For "Begin"/"Outside", finish any completed chunks -
# also do so for "Inside" which don't match the previous token.
mismatch_I = state == "I" and chunk_type != stack[-1].label()
if state in "BO" or mismatch_I:
if len(stack) == 2:
stack.pop()
# For "Begin", start a new chunk.
if state == "B" or mismatch_I:
chunk = Tree(chunk_type, [])
stack[-1].append(chunk)
stack.append(chunk)
# Add the new word token.
stack[-1].append((word, tag))
return stack[0]
def tree2conllstr(t):
"""
Return a multiline string where each line contains a word, tag and IOB tag.
Convert a tree to the CoNLL IOB string format
:param t: The tree to be converted.
:type t: Tree
:rtype: str
"""
lines = [" ".join(token) for token in tree2conlltags(t)]
return "\n".join(lines)
def demo():
s = "[ Pierre/NNP Vinken/NNP ] ,/, [ 61/CD years/NNS ] old/JJ ,/, will/MD join/VB [ the/DT board/NN ] ./."
import nltk
t = nltk.chunk.tagstr2tree(s, chunk_label="NP")
t.pprint()
print()
s = """
These DT B-NP
research NN I-NP
protocols NNS I-NP
offer VBP B-VP
to TO B-PP
the DT B-NP
patient NN I-NP
not RB O
only RB O
the DT B-NP
very RB I-NP
best JJS I-NP
therapy NN I-NP
which WDT B-NP
we PRP B-NP
have VBP B-VP
established VBN I-VP
today NN B-NP
but CC B-NP
also RB I-NP
the DT B-NP
hope NN I-NP
of IN B-PP
something NN B-NP
still RB B-ADJP
better JJR I-ADJP
. . O
"""
conll_tree = conllstr2tree(s, chunk_types=("NP", "PP"))
conll_tree.pprint()
# Demonstrate CoNLL output
print("CoNLL output:")
print(nltk.chunk.tree2conllstr(conll_tree))
print() | null |
170,900 | import os
import pickle
import re
from xml.etree import ElementTree as ET
from nltk.tag import ClassifierBasedTagger, pos_tag
from nltk.chunk.api import ChunkParserI
from nltk.chunk.util import ChunkScore
from nltk.data import find
from nltk.tokenize import word_tokenize
from nltk.tree import Tree
def shape(word):
if re.match(r"[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$", word, re.UNICODE):
return "number"
elif re.match(r"\W+$", word, re.UNICODE):
return "punct"
elif re.match(r"\w+$", word, re.UNICODE):
if word.istitle():
return "upcase"
elif word.islower():
return "downcase"
else:
return "mixedcase"
else:
return "other" | null |
170,901 | import os
import pickle
import re
from xml.etree import ElementTree as ET
from nltk.tag import ClassifierBasedTagger, pos_tag
from nltk.chunk.api import ChunkParserI
from nltk.chunk.util import ChunkScore
from nltk.data import find
from nltk.tokenize import word_tokenize
from nltk.tree import Tree
def simplify_pos(s):
if s.startswith("V"):
return "V"
else:
return s.split("-")[0] | null |
170,902 | import os
import pickle
import re
from xml.etree import ElementTree as ET
from nltk.tag import ClassifierBasedTagger, pos_tag
from nltk.chunk.api import ChunkParserI
from nltk.chunk.util import ChunkScore
from nltk.data import find
from nltk.tokenize import word_tokenize
from nltk.tree import Tree
class NEChunkParser(ChunkParserI):
"""
Expected input: list of pos-tagged words
"""
def __init__(self, train):
self._train(train)
def parse(self, tokens):
"""
Each token should be a pos-tagged word
"""
tagged = self._tagger.tag(tokens)
tree = self._tagged_to_parse(tagged)
return tree
def _train(self, corpus):
# Convert to tagged sequence
corpus = [self._parse_to_tagged(s) for s in corpus]
self._tagger = NEChunkParserTagger(train=corpus)
def _tagged_to_parse(self, tagged_tokens):
"""
Convert a list of tagged tokens to a chunk-parse tree.
"""
sent = Tree("S", [])
for (tok, tag) in tagged_tokens:
if tag == "O":
sent.append(tok)
elif tag.startswith("B-"):
sent.append(Tree(tag[2:], [tok]))
elif tag.startswith("I-"):
if sent and isinstance(sent[-1], Tree) and sent[-1].label() == tag[2:]:
sent[-1].append(tok)
else:
sent.append(Tree(tag[2:], [tok]))
return sent
def _parse_to_tagged(sent):
"""
Convert a chunk-parse tree to a list of tagged tokens.
"""
toks = []
for child in sent:
if isinstance(child, Tree):
if len(child) == 0:
print("Warning -- empty chunk in sentence")
continue
toks.append((child[0], f"B-{child.label()}"))
for tok in child[1:]:
toks.append((tok, f"I-{child.label()}"))
else:
toks.append((child, "O"))
return toks
def postag_tree(tree):
# Part-of-speech tagging.
words = tree.leaves()
tag_iter = (pos for (word, pos) in pos_tag(words))
newtree = Tree("S", [])
for child in tree:
if isinstance(child, Tree):
newtree.append(Tree(child.label(), []))
for subchild in child:
newtree[-1].append((subchild, next(tag_iter)))
else:
newtree.append((child, next(tag_iter)))
return newtree
def load_ace_data(roots, fmt="binary", skip_bnews=True):
for root in roots:
for root, dirs, files in os.walk(root):
if root.endswith("bnews") and skip_bnews:
continue
for f in files:
if f.endswith(".sgm"):
yield from load_ace_file(os.path.join(root, f), fmt)
def cmp_chunks(correct, guessed):
correct = NEChunkParser._parse_to_tagged(correct)
guessed = NEChunkParser._parse_to_tagged(guessed)
ellipsis = False
for (w, ct), (w, gt) in zip(correct, guessed):
if ct == gt == "O":
if not ellipsis:
print(f" {ct:15} {gt:15} {w}")
print(" {:15} {:15} {2}".format("...", "...", "..."))
ellipsis = True
else:
ellipsis = False
print(f" {ct:15} {gt:15} {w}")
class ChunkScore:
"""
A utility class for scoring chunk parsers. ``ChunkScore`` can
evaluate a chunk parser's output, based on a number of statistics
(precision, recall, f-measure, misssed chunks, incorrect chunks).
It can also combine the scores from the parsing of multiple texts;
this makes it significantly easier to evaluate a chunk parser that
operates one sentence at a time.
Texts are evaluated with the ``score`` method. The results of
evaluation can be accessed via a number of accessor methods, such
as ``precision`` and ``f_measure``. A typical use of the
``ChunkScore`` class is::
>>> chunkscore = ChunkScore() # doctest: +SKIP
>>> for correct in correct_sentences: # doctest: +SKIP
... guess = chunkparser.parse(correct.leaves()) # doctest: +SKIP
... chunkscore.score(correct, guess) # doctest: +SKIP
>>> print('F Measure:', chunkscore.f_measure()) # doctest: +SKIP
F Measure: 0.823
:ivar kwargs: Keyword arguments:
- max_tp_examples: The maximum number actual examples of true
positives to record. This affects the ``correct`` member
function: ``correct`` will not return more than this number
of true positive examples. This does *not* affect any of
the numerical metrics (precision, recall, or f-measure)
- max_fp_examples: The maximum number actual examples of false
positives to record. This affects the ``incorrect`` member
function and the ``guessed`` member function: ``incorrect``
will not return more than this number of examples, and
``guessed`` will not return more than this number of true
positive examples. This does *not* affect any of the
numerical metrics (precision, recall, or f-measure)
- max_fn_examples: The maximum number actual examples of false
negatives to record. This affects the ``missed`` member
function and the ``correct`` member function: ``missed``
will not return more than this number of examples, and
``correct`` will not return more than this number of true
negative examples. This does *not* affect any of the
numerical metrics (precision, recall, or f-measure)
- chunk_label: A regular expression indicating which chunks
should be compared. Defaults to ``'.*'`` (i.e., all chunks).
:type _tp: list(Token)
:ivar _tp: List of true positives
:type _fp: list(Token)
:ivar _fp: List of false positives
:type _fn: list(Token)
:ivar _fn: List of false negatives
:type _tp_num: int
:ivar _tp_num: Number of true positives
:type _fp_num: int
:ivar _fp_num: Number of false positives
:type _fn_num: int
:ivar _fn_num: Number of false negatives.
"""
def __init__(self, **kwargs):
self._correct = set()
self._guessed = set()
self._tp = set()
self._fp = set()
self._fn = set()
self._max_tp = kwargs.get("max_tp_examples", 100)
self._max_fp = kwargs.get("max_fp_examples", 100)
self._max_fn = kwargs.get("max_fn_examples", 100)
self._chunk_label = kwargs.get("chunk_label", ".*")
self._tp_num = 0
self._fp_num = 0
self._fn_num = 0
self._count = 0
self._tags_correct = 0.0
self._tags_total = 0.0
self._measuresNeedUpdate = False
def _updateMeasures(self):
if self._measuresNeedUpdate:
self._tp = self._guessed & self._correct
self._fn = self._correct - self._guessed
self._fp = self._guessed - self._correct
self._tp_num = len(self._tp)
self._fp_num = len(self._fp)
self._fn_num = len(self._fn)
self._measuresNeedUpdate = False
def score(self, correct, guessed):
"""
Given a correctly chunked sentence, score another chunked
version of the same sentence.
:type correct: chunk structure
:param correct: The known-correct ("gold standard") chunked
sentence.
:type guessed: chunk structure
:param guessed: The chunked sentence to be scored.
"""
self._correct |= _chunksets(correct, self._count, self._chunk_label)
self._guessed |= _chunksets(guessed, self._count, self._chunk_label)
self._count += 1
self._measuresNeedUpdate = True
# Keep track of per-tag accuracy (if possible)
try:
correct_tags = tree2conlltags(correct)
guessed_tags = tree2conlltags(guessed)
except ValueError:
# This exception case is for nested chunk structures,
# where tree2conlltags will fail with a ValueError: "Tree
# is too deeply nested to be printed in CoNLL format."
correct_tags = guessed_tags = ()
self._tags_total += len(correct_tags)
self._tags_correct += sum(
1 for (t, g) in zip(guessed_tags, correct_tags) if t == g
)
def accuracy(self):
"""
Return the overall tag-based accuracy for all text that have
been scored by this ``ChunkScore``, using the IOB (conll2000)
tag encoding.
:rtype: float
"""
if self._tags_total == 0:
return 1
return self._tags_correct / self._tags_total
def precision(self):
"""
Return the overall precision for all texts that have been
scored by this ``ChunkScore``.
:rtype: float
"""
self._updateMeasures()
div = self._tp_num + self._fp_num
if div == 0:
return 0
else:
return self._tp_num / div
def recall(self):
"""
Return the overall recall for all texts that have been
scored by this ``ChunkScore``.
:rtype: float
"""
self._updateMeasures()
div = self._tp_num + self._fn_num
if div == 0:
return 0
else:
return self._tp_num / div
def f_measure(self, alpha=0.5):
"""
Return the overall F measure for all texts that have been
scored by this ``ChunkScore``.
:param alpha: the relative weighting of precision and recall.
Larger alpha biases the score towards the precision value,
while smaller alpha biases the score towards the recall
value. ``alpha`` should have a value in the range [0,1].
:type alpha: float
:rtype: float
"""
self._updateMeasures()
p = self.precision()
r = self.recall()
if p == 0 or r == 0: # what if alpha is 0 or 1?
return 0
return 1 / (alpha / p + (1 - alpha) / r)
def missed(self):
"""
Return the chunks which were included in the
correct chunk structures, but not in the guessed chunk
structures, listed in input order.
:rtype: list of chunks
"""
self._updateMeasures()
chunks = list(self._fn)
return [c[1] for c in chunks] # discard position information
def incorrect(self):
"""
Return the chunks which were included in the guessed chunk structures,
but not in the correct chunk structures, listed in input order.
:rtype: list of chunks
"""
self._updateMeasures()
chunks = list(self._fp)
return [c[1] for c in chunks] # discard position information
def correct(self):
"""
Return the chunks which were included in the correct
chunk structures, listed in input order.
:rtype: list of chunks
"""
chunks = list(self._correct)
return [c[1] for c in chunks] # discard position information
def guessed(self):
"""
Return the chunks which were included in the guessed
chunk structures, listed in input order.
:rtype: list of chunks
"""
chunks = list(self._guessed)
return [c[1] for c in chunks] # discard position information
def __len__(self):
self._updateMeasures()
return self._tp_num + self._fn_num
def __repr__(self):
"""
Return a concise representation of this ``ChunkScoring``.
:rtype: str
"""
return "<ChunkScoring of " + repr(len(self)) + " chunks>"
def __str__(self):
"""
Return a verbose representation of this ``ChunkScoring``.
This representation includes the precision, recall, and
f-measure scores. For other information about the score,
use the accessor methods (e.g., ``missed()`` and ``incorrect()``).
:rtype: str
"""
return (
"ChunkParse score:\n"
+ (f" IOB Accuracy: {self.accuracy() * 100:5.1f}%%\n")
+ (f" Precision: {self.precision() * 100:5.1f}%%\n")
+ (f" Recall: {self.recall() * 100:5.1f}%%\n")
+ (f" F-Measure: {self.f_measure() * 100:5.1f}%%")
)
def find(resource_name, paths=None):
"""
Find the given resource by searching through the directories and
zip files in paths, where a None or empty string specifies an absolute path.
Returns a corresponding path name. If the given resource is not
found, raise a ``LookupError``, whose message gives a pointer to
the installation instructions for the NLTK downloader.
Zip File Handling:
- If ``resource_name`` contains a component with a ``.zip``
extension, then it is assumed to be a zipfile; and the
remaining path components are used to look inside the zipfile.
- If any element of ``nltk.data.path`` has a ``.zip`` extension,
then it is assumed to be a zipfile.
- If a given resource name that does not contain any zipfile
component is not found initially, then ``find()`` will make a
second attempt to find that resource, by replacing each
component *p* in the path with *p.zip/p*. For example, this
allows ``find()`` to map the resource name
``corpora/chat80/cities.pl`` to a zip file path pointer to
``corpora/chat80.zip/chat80/cities.pl``.
- When using ``find()`` to locate a directory contained in a
zipfile, the resource name must end with the forward slash
character. Otherwise, ``find()`` will not locate the
directory.
:type resource_name: str or unicode
:param resource_name: The name of the resource to search for.
Resource names are posix-style relative path names, such as
``corpora/brown``. Directory names will be
automatically converted to a platform-appropriate path separator.
:rtype: str
"""
resource_name = normalize_resource_name(resource_name, True)
# Resolve default paths at runtime in-case the user overrides
# nltk.data.path
if paths is None:
paths = path
# Check if the resource name includes a zipfile name
m = re.match(r"(.*\.zip)/?(.*)$|", resource_name)
zipfile, zipentry = m.groups()
# Check each item in our path
for path_ in paths:
# Is the path item a zipfile?
if path_ and (os.path.isfile(path_) and path_.endswith(".zip")):
try:
return ZipFilePathPointer(path_, resource_name)
except OSError:
# resource not in zipfile
continue
# Is the path item a directory or is resource_name an absolute path?
elif not path_ or os.path.isdir(path_):
if zipfile is None:
p = os.path.join(path_, url2pathname(resource_name))
if os.path.exists(p):
if p.endswith(".gz"):
return GzipFileSystemPathPointer(p)
else:
return FileSystemPathPointer(p)
else:
p = os.path.join(path_, url2pathname(zipfile))
if os.path.exists(p):
try:
return ZipFilePathPointer(p, zipentry)
except OSError:
# resource not in zipfile
continue
# Fallback: if the path doesn't include a zip file, then try
# again, assuming that one of the path components is inside a
# zipfile of the same name.
if zipfile is None:
pieces = resource_name.split("/")
for i in range(len(pieces)):
modified_name = "/".join(pieces[:i] + [pieces[i] + ".zip"] + pieces[i:])
try:
return find(modified_name, paths)
except LookupError:
pass
# Identify the package (i.e. the .zip file) to download.
resource_zipname = resource_name.split("/")[1]
if resource_zipname.endswith(".zip"):
resource_zipname = resource_zipname.rpartition(".")[0]
# Display a friendly error message if the resource wasn't found:
msg = str(
"Resource \33[93m{resource}\033[0m not found.\n"
"Please use the NLTK Downloader to obtain the resource:\n\n"
"\33[31m" # To display red text in terminal.
">>> import nltk\n"
">>> nltk.download('{resource}')\n"
"\033[0m"
).format(resource=resource_zipname)
msg = textwrap_indent(msg)
msg += "\n For more information see: https://www.nltk.org/data.html\n"
msg += "\n Attempted to load \33[93m{resource_name}\033[0m\n".format(
resource_name=resource_name
)
msg += "\n Searched in:" + "".join("\n - %r" % d for d in paths)
sep = "*" * 70
resource_not_found = f"\n{sep}\n{msg}\n{sep}\n"
raise LookupError(resource_not_found)
def build_model(fmt="binary"):
print("Loading training data...")
train_paths = [
find("corpora/ace_data/ace.dev"),
find("corpora/ace_data/ace.heldout"),
find("corpora/ace_data/bbn.dev"),
find("corpora/ace_data/muc.dev"),
]
train_trees = load_ace_data(train_paths, fmt)
train_data = [postag_tree(t) for t in train_trees]
print("Training...")
cp = NEChunkParser(train_data)
del train_data
print("Loading eval data...")
eval_paths = [find("corpora/ace_data/ace.eval")]
eval_trees = load_ace_data(eval_paths, fmt)
eval_data = [postag_tree(t) for t in eval_trees]
print("Evaluating...")
chunkscore = ChunkScore()
for i, correct in enumerate(eval_data):
guess = cp.parse(correct.leaves())
chunkscore.score(correct, guess)
if i < 3:
cmp_chunks(correct, guess)
print(chunkscore)
outfilename = f"/tmp/ne_chunker_{fmt}.pickle"
print(f"Saving chunker to {outfilename}...")
with open(outfilename, "wb") as outfile:
pickle.dump(cp, outfile, -1)
return cp | null |
170,903 | import re
import regex
from nltk.chunk.api import ChunkParserI
from nltk.tree import Tree
class ChunkString:
"""
A string-based encoding of a particular chunking of a text.
Internally, the ``ChunkString`` class uses a single string to
encode the chunking of the input text. This string contains a
sequence of angle-bracket delimited tags, with chunking indicated
by braces. An example of this encoding is::
{<DT><JJ><NN>}<VBN><IN>{<DT><NN>}<.>{<DT><NN>}<VBD><.>
``ChunkString`` are created from tagged texts (i.e., lists of
``tokens`` whose type is ``TaggedType``). Initially, nothing is
chunked.
The chunking of a ``ChunkString`` can be modified with the ``xform()``
method, which uses a regular expression to transform the string
representation. These transformations should only add and remove
braces; they should *not* modify the sequence of angle-bracket
delimited tags.
:type _str: str
:ivar _str: The internal string representation of the text's
encoding. This string representation contains a sequence of
angle-bracket delimited tags, with chunking indicated by
braces. An example of this encoding is::
{<DT><JJ><NN>}<VBN><IN>{<DT><NN>}<.>{<DT><NN>}<VBD><.>
:type _pieces: list(tagged tokens and chunks)
:ivar _pieces: The tagged tokens and chunks encoded by this ``ChunkString``.
:ivar _debug: The debug level. See the constructor docs.
:cvar IN_CHUNK_PATTERN: A zero-width regexp pattern string that
will only match positions that are in chunks.
:cvar IN_STRIP_PATTERN: A zero-width regexp pattern string that
will only match positions that are in strips.
"""
CHUNK_TAG_CHAR = r"[^\{\}<>]"
CHUNK_TAG = r"(<%s+?>)" % CHUNK_TAG_CHAR
IN_CHUNK_PATTERN = r"(?=[^\{]*\})"
IN_STRIP_PATTERN = r"(?=[^\}]*(\{|$))"
# These are used by _verify
_CHUNK = r"(\{%s+?\})+?" % CHUNK_TAG
_STRIP = r"(%s+?)+?" % CHUNK_TAG
_VALID = re.compile(r"^(\{?%s\}?)*?$" % CHUNK_TAG)
_BRACKETS = re.compile(r"[^\{\}]+")
_BALANCED_BRACKETS = re.compile(r"(\{\})*$")
def __init__(self, chunk_struct, debug_level=1):
"""
Construct a new ``ChunkString`` that encodes the chunking of
the text ``tagged_tokens``.
:type chunk_struct: Tree
:param chunk_struct: The chunk structure to be further chunked.
:type debug_level: int
:param debug_level: The level of debugging which should be
applied to transformations on the ``ChunkString``. The
valid levels are:
- 0: no checks
- 1: full check on to_chunkstruct
- 2: full check on to_chunkstruct and cursory check after
each transformation.
- 3: full check on to_chunkstruct and full check after
each transformation.
We recommend you use at least level 1. You should
probably use level 3 if you use any non-standard
subclasses of ``RegexpChunkRule``.
"""
self._root_label = chunk_struct.label()
self._pieces = chunk_struct[:]
tags = [self._tag(tok) for tok in self._pieces]
self._str = "<" + "><".join(tags) + ">"
self._debug = debug_level
def _tag(self, tok):
if isinstance(tok, tuple):
return tok[1]
elif isinstance(tok, Tree):
return tok.label()
else:
raise ValueError("chunk structures must contain tagged " "tokens or trees")
def _verify(self, s, verify_tags):
"""
Check to make sure that ``s`` still corresponds to some chunked
version of ``_pieces``.
:type verify_tags: bool
:param verify_tags: Whether the individual tags should be
checked. If this is false, ``_verify`` will check to make
sure that ``_str`` encodes a chunked version of *some*
list of tokens. If this is true, then ``_verify`` will
check to make sure that the tags in ``_str`` match those in
``_pieces``.
:raise ValueError: if the internal string representation of
this ``ChunkString`` is invalid or not consistent with _pieces.
"""
# Check overall form
if not ChunkString._VALID.match(s):
raise ValueError(
"Transformation generated invalid " "chunkstring:\n %s" % s
)
# Check that parens are balanced. If the string is long, we
# have to do this in pieces, to avoid a maximum recursion
# depth limit for regular expressions.
brackets = ChunkString._BRACKETS.sub("", s)
for i in range(1 + len(brackets) // 5000):
substr = brackets[i * 5000 : i * 5000 + 5000]
if not ChunkString._BALANCED_BRACKETS.match(substr):
raise ValueError(
"Transformation generated invalid " "chunkstring:\n %s" % s
)
if verify_tags <= 0:
return
tags1 = (re.split(r"[\{\}<>]+", s))[1:-1]
tags2 = [self._tag(piece) for piece in self._pieces]
if tags1 != tags2:
raise ValueError(
"Transformation generated invalid " "chunkstring: tag changed"
)
def to_chunkstruct(self, chunk_label="CHUNK"):
"""
Return the chunk structure encoded by this ``ChunkString``.
:rtype: Tree
:raise ValueError: If a transformation has generated an
invalid chunkstring.
"""
if self._debug > 0:
self._verify(self._str, 1)
# Use this alternating list to create the chunkstruct.
pieces = []
index = 0
piece_in_chunk = 0
for piece in re.split("[{}]", self._str):
# Find the list of tokens contained in this piece.
length = piece.count("<")
subsequence = self._pieces[index : index + length]
# Add this list of tokens to our pieces.
if piece_in_chunk:
pieces.append(Tree(chunk_label, subsequence))
else:
pieces += subsequence
# Update index, piece_in_chunk
index += length
piece_in_chunk = not piece_in_chunk
return Tree(self._root_label, pieces)
def xform(self, regexp, repl):
"""
Apply the given transformation to the string encoding of this
``ChunkString``. In particular, find all occurrences that match
``regexp``, and replace them using ``repl`` (as done by
``re.sub``).
This transformation should only add and remove braces; it
should *not* modify the sequence of angle-bracket delimited
tags. Furthermore, this transformation may not result in
improper bracketing. Note, in particular, that bracketing may
not be nested.
:type regexp: str or regexp
:param regexp: A regular expression matching the substring
that should be replaced. This will typically include a
named group, which can be used by ``repl``.
:type repl: str
:param repl: An expression specifying what should replace the
matched substring. Typically, this will include a named
replacement group, specified by ``regexp``.
:rtype: None
:raise ValueError: If this transformation generated an
invalid chunkstring.
"""
# Do the actual substitution
s = re.sub(regexp, repl, self._str)
# The substitution might have generated "empty chunks"
# (substrings of the form "{}"). Remove them, so they don't
# interfere with other transformations.
s = re.sub(r"\{\}", "", s)
# Make sure that the transformation was legal.
if self._debug > 1:
self._verify(s, self._debug - 2)
# Commit the transformation.
self._str = s
def __repr__(self):
"""
Return a string representation of this ``ChunkString``.
It has the form::
<ChunkString: '{<DT><JJ><NN>}<VBN><IN>{<DT><NN>}'>
:rtype: str
"""
return "<ChunkString: %s>" % repr(self._str)
def __str__(self):
"""
Return a formatted representation of this ``ChunkString``.
This representation will include extra spaces to ensure that
tags will line up with the representation of other
``ChunkStrings`` for the same text, regardless of the chunking.
:rtype: str
"""
# Add spaces to make everything line up.
str = re.sub(r">(?!\})", r"> ", self._str)
str = re.sub(r"([^\{])<", r"\1 <", str)
if str[0] == "<":
str = " " + str
return str
CHUNK_TAG_PATTERN = re.compile(
r"^(({}|<{}>)*)$".format(r"([^\{\}<>]|\{\d+,?\}|\{\d*,\d+\})+", r"[^\{\}<>]+")
)
The provided code snippet includes necessary dependencies for implementing the `tag_pattern2re_pattern` function. Write a Python function `def tag_pattern2re_pattern(tag_pattern)` to solve the following problem:
Convert a tag pattern to a regular expression pattern. A "tag pattern" is a modified version of a regular expression, designed for matching sequences of tags. The differences between regular expression patterns and tag patterns are: - In tag patterns, ``'<'`` and ``'>'`` act as parentheses; so ``'<NN>+'`` matches one or more repetitions of ``'<NN>'``, not ``'<NN'`` followed by one or more repetitions of ``'>'``. - Whitespace in tag patterns is ignored. So ``'<DT> | <NN>'`` is equivalent to ``'<DT>|<NN>'`` - In tag patterns, ``'.'`` is equivalent to ``'[^{}<>]'``; so ``'<NN.*>'`` matches any single tag starting with ``'NN'``. In particular, ``tag_pattern2re_pattern`` performs the following transformations on the given pattern: - Replace '.' with '[^<>{}]' - Remove any whitespace - Add extra parens around '<' and '>', to make '<' and '>' act like parentheses. E.g., so that in '<NN>+', the '+' has scope over the entire '<NN>'; and so that in '<NN|IN>', the '|' has scope over 'NN' and 'IN', but not '<' or '>'. - Check to make sure the resulting pattern is valid. :type tag_pattern: str :param tag_pattern: The tag pattern to convert to a regular expression pattern. :raise ValueError: If ``tag_pattern`` is not a valid tag pattern. In particular, ``tag_pattern`` should not include braces; and it should not contain nested or mismatched angle-brackets. :rtype: str :return: A regular expression pattern corresponding to ``tag_pattern``.
Here is the function:
def tag_pattern2re_pattern(tag_pattern):
"""
Convert a tag pattern to a regular expression pattern. A "tag
pattern" is a modified version of a regular expression, designed
for matching sequences of tags. The differences between regular
expression patterns and tag patterns are:
- In tag patterns, ``'<'`` and ``'>'`` act as parentheses; so
``'<NN>+'`` matches one or more repetitions of ``'<NN>'``, not
``'<NN'`` followed by one or more repetitions of ``'>'``.
- Whitespace in tag patterns is ignored. So
``'<DT> | <NN>'`` is equivalent to ``'<DT>|<NN>'``
- In tag patterns, ``'.'`` is equivalent to ``'[^{}<>]'``; so
``'<NN.*>'`` matches any single tag starting with ``'NN'``.
In particular, ``tag_pattern2re_pattern`` performs the following
transformations on the given pattern:
- Replace '.' with '[^<>{}]'
- Remove any whitespace
- Add extra parens around '<' and '>', to make '<' and '>' act
like parentheses. E.g., so that in '<NN>+', the '+' has scope
over the entire '<NN>'; and so that in '<NN|IN>', the '|' has
scope over 'NN' and 'IN', but not '<' or '>'.
- Check to make sure the resulting pattern is valid.
:type tag_pattern: str
:param tag_pattern: The tag pattern to convert to a regular
expression pattern.
:raise ValueError: If ``tag_pattern`` is not a valid tag pattern.
In particular, ``tag_pattern`` should not include braces; and it
should not contain nested or mismatched angle-brackets.
:rtype: str
:return: A regular expression pattern corresponding to
``tag_pattern``.
"""
# Clean up the regular expression
tag_pattern = re.sub(r"\s", "", tag_pattern)
tag_pattern = re.sub(r"<", "(<(", tag_pattern)
tag_pattern = re.sub(r">", ")>)", tag_pattern)
# Check the regular expression
if not CHUNK_TAG_PATTERN.match(tag_pattern):
raise ValueError("Bad tag pattern: %r" % tag_pattern)
# Replace "." with CHUNK_TAG_CHAR.
# We have to do this after, since it adds {}[]<>s, which would
# confuse CHUNK_TAG_PATTERN.
# PRE doesn't have lookback assertions, so reverse twice, and do
# the pattern backwards (with lookahead assertions). This can be
# made much cleaner once we can switch back to SRE.
def reverse_str(str):
lst = list(str)
lst.reverse()
return "".join(lst)
tc_rev = reverse_str(ChunkString.CHUNK_TAG_CHAR)
reversed = reverse_str(tag_pattern)
reversed = re.sub(r"\.(?!\\(\\\\)*($|[^\\]))", tc_rev, reversed)
tag_pattern = reverse_str(reversed)
return tag_pattern | Convert a tag pattern to a regular expression pattern. A "tag pattern" is a modified version of a regular expression, designed for matching sequences of tags. The differences between regular expression patterns and tag patterns are: - In tag patterns, ``'<'`` and ``'>'`` act as parentheses; so ``'<NN>+'`` matches one or more repetitions of ``'<NN>'``, not ``'<NN'`` followed by one or more repetitions of ``'>'``. - Whitespace in tag patterns is ignored. So ``'<DT> | <NN>'`` is equivalent to ``'<DT>|<NN>'`` - In tag patterns, ``'.'`` is equivalent to ``'[^{}<>]'``; so ``'<NN.*>'`` matches any single tag starting with ``'NN'``. In particular, ``tag_pattern2re_pattern`` performs the following transformations on the given pattern: - Replace '.' with '[^<>{}]' - Remove any whitespace - Add extra parens around '<' and '>', to make '<' and '>' act like parentheses. E.g., so that in '<NN>+', the '+' has scope over the entire '<NN>'; and so that in '<NN|IN>', the '|' has scope over 'NN' and 'IN', but not '<' or '>'. - Check to make sure the resulting pattern is valid. :type tag_pattern: str :param tag_pattern: The tag pattern to convert to a regular expression pattern. :raise ValueError: If ``tag_pattern`` is not a valid tag pattern. In particular, ``tag_pattern`` should not include braces; and it should not contain nested or mismatched angle-brackets. :rtype: str :return: A regular expression pattern corresponding to ``tag_pattern``. |
170,904 | import re
import regex
from nltk.chunk.api import ChunkParserI
from nltk.tree import Tree
class RegexpParser(ChunkParserI):
r"""
A grammar based chunk parser. ``chunk.RegexpParser`` uses a set of
regular expression patterns to specify the behavior of the parser.
The chunking of the text is encoded using a ``ChunkString``, and
each rule acts by modifying the chunking in the ``ChunkString``.
The rules are all implemented using regular expression matching
and substitution.
A grammar contains one or more clauses in the following form::
NP:
{<DT|JJ>} # chunk determiners and adjectives
}<[\.VI].*>+{ # strip any tag beginning with V, I, or .
<.*>}{<DT> # split a chunk at a determiner
<DT|JJ>{}<NN.*> # merge chunk ending with det/adj
# with one starting with a noun
The patterns of a clause are executed in order. An earlier
pattern may introduce a chunk boundary that prevents a later
pattern from executing. Sometimes an individual pattern will
match on multiple, overlapping extents of the input. As with
regular expression substitution more generally, the chunker will
identify the first match possible, then continue looking for matches
after this one has ended.
The clauses of a grammar are also executed in order. A cascaded
chunk parser is one having more than one clause. The maximum depth
of a parse tree created by this chunk parser is the same as the
number of clauses in the grammar.
When tracing is turned on, the comment portion of a line is displayed
each time the corresponding pattern is applied.
:type _start: str
:ivar _start: The start symbol of the grammar (the root node of
resulting trees)
:type _stages: int
:ivar _stages: The list of parsing stages corresponding to the grammar
"""
def __init__(self, grammar, root_label="S", loop=1, trace=0):
"""
Create a new chunk parser, from the given start state
and set of chunk patterns.
:param grammar: The grammar, or a list of RegexpChunkParser objects
:type grammar: str or list(RegexpChunkParser)
:param root_label: The top node of the tree being created
:type root_label: str or Nonterminal
:param loop: The number of times to run through the patterns
:type loop: int
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
``1`` will generate normal tracing output; and ``2`` or
higher will generate verbose tracing output.
"""
self._trace = trace
self._stages = []
self._grammar = grammar
self._loop = loop
if isinstance(grammar, str):
self._read_grammar(grammar, root_label, trace)
else:
# Make sur the grammar looks like it has the right type:
type_err = (
"Expected string or list of RegexpChunkParsers " "for the grammar."
)
try:
grammar = list(grammar)
except BaseException as e:
raise TypeError(type_err) from e
for elt in grammar:
if not isinstance(elt, RegexpChunkParser):
raise TypeError(type_err)
self._stages = grammar
def _read_grammar(self, grammar, root_label, trace):
"""
Helper function for __init__: read the grammar if it is a
string.
"""
rules = []
lhs = None
pattern = regex.compile("(?P<nonterminal>(\\.|[^:])*)(:(?P<rule>.*))")
for line in grammar.split("\n"):
line = line.strip()
# New stage begins if there's an unescaped ':'
m = pattern.match(line)
if m:
# Record the stage that we just completed.
self._add_stage(rules, lhs, root_label, trace)
# Start a new stage.
lhs = m.group("nonterminal").strip()
rules = []
line = m.group("rule").strip()
# Skip blank & comment-only lines
if line == "" or line.startswith("#"):
continue
# Add the rule
rules.append(RegexpChunkRule.fromstring(line))
# Record the final stage
self._add_stage(rules, lhs, root_label, trace)
def _add_stage(self, rules, lhs, root_label, trace):
"""
Helper function for __init__: add a new stage to the parser.
"""
if rules != []:
if not lhs:
raise ValueError("Expected stage marker (eg NP:)")
parser = RegexpChunkParser(
rules, chunk_label=lhs, root_label=root_label, trace=trace
)
self._stages.append(parser)
def parse(self, chunk_struct, trace=None):
"""
Apply the chunk parser to this input.
:type chunk_struct: Tree
:param chunk_struct: the chunk structure to be (further) chunked
(this tree is modified, and is also returned)
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
``1`` will generate normal tracing output; and ``2`` or
higher will generate verbose tracing output. This value
overrides the trace level value that was given to the
constructor.
:return: the chunked output.
:rtype: Tree
"""
if trace is None:
trace = self._trace
for i in range(self._loop):
for parser in self._stages:
chunk_struct = parser.parse(chunk_struct, trace=trace)
return chunk_struct
def __repr__(self):
"""
:return: a concise string representation of this ``chunk.RegexpParser``.
:rtype: str
"""
return "<chunk.RegexpParser with %d stages>" % len(self._stages)
def __str__(self):
"""
:return: a verbose string representation of this
``RegexpParser``.
:rtype: str
"""
s = "chunk.RegexpParser with %d stages:\n" % len(self._stages)
margin = 0
for parser in self._stages:
s += "%s\n" % parser
return s[:-1]
def demo_eval(chunkparser, text):
"""
Demonstration code for evaluating a chunk parser, using a
``ChunkScore``. This function assumes that ``text`` contains one
sentence per line, and that each sentence has the form expected by
``tree.chunk``. It runs the given chunk parser on each sentence in
the text, and scores the result. It prints the final score
(precision, recall, and f-measure); and reports the set of chunks
that were missed and the set of chunks that were incorrect. (At
most 10 missing chunks and 10 incorrect chunks are reported).
:param chunkparser: The chunkparser to be tested
:type chunkparser: ChunkParserI
:param text: The chunked tagged text that should be used for
evaluation.
:type text: str
"""
from nltk import chunk
from nltk.tree import Tree
# Evaluate our chunk parser.
chunkscore = chunk.ChunkScore()
for sentence in text.split("\n"):
print(sentence)
sentence = sentence.strip()
if not sentence:
continue
gold = chunk.tagstr2tree(sentence)
tokens = gold.leaves()
test = chunkparser.parse(Tree("S", tokens), trace=1)
chunkscore.score(gold, test)
print()
print("/" + ("=" * 75) + "\\")
print("Scoring", chunkparser)
print("-" * 77)
print("Precision: %5.1f%%" % (chunkscore.precision() * 100), " " * 4, end=" ")
print("Recall: %5.1f%%" % (chunkscore.recall() * 100), " " * 6, end=" ")
print("F-Measure: %5.1f%%" % (chunkscore.f_measure() * 100))
# Missed chunks.
if chunkscore.missed():
print("Missed:")
missed = chunkscore.missed()
for chunk in missed[:10]:
print(" ", " ".join(map(str, chunk)))
if len(chunkscore.missed()) > 10:
print(" ...")
# Incorrect chunks.
if chunkscore.incorrect():
print("Incorrect:")
incorrect = chunkscore.incorrect()
for chunk in incorrect[:10]:
print(" ", " ".join(map(str, chunk)))
if len(chunkscore.incorrect()) > 10:
print(" ...")
print("\\" + ("=" * 75) + "/")
print()
conll2000: ConllChunkCorpusReader = LazyCorpusLoader(
"conll2000",
ConllChunkCorpusReader,
["train.txt", "test.txt"],
("NP", "VP", "PP"),
tagset="wsj",
encoding="ascii",
)
The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem:
A demonstration for the ``RegexpChunkParser`` class. A single text is parsed with four different chunk parsers, using a variety of rules and strategies.
Here is the function:
def demo():
"""
A demonstration for the ``RegexpChunkParser`` class. A single text is
parsed with four different chunk parsers, using a variety of rules
and strategies.
"""
from nltk import Tree, chunk
text = """\
[ the/DT little/JJ cat/NN ] sat/VBD on/IN [ the/DT mat/NN ] ./.
[ John/NNP ] saw/VBD [the/DT cats/NNS] [the/DT dog/NN] chased/VBD ./.
[ John/NNP ] thinks/VBZ [ Mary/NN ] saw/VBD [ the/DT cat/NN ] sit/VB on/IN [ the/DT mat/NN ]./.
"""
print("*" * 75)
print("Evaluation text:")
print(text)
print("*" * 75)
print()
grammar = r"""
NP: # NP stage
{<DT>?<JJ>*<NN>} # chunk determiners, adjectives and nouns
{<NNP>+} # chunk proper nouns
"""
cp = chunk.RegexpParser(grammar)
demo_eval(cp, text)
grammar = r"""
NP:
{<.*>} # start by chunking each tag
}<[\.VI].*>+{ # unchunk any verbs, prepositions or periods
<DT|JJ>{}<NN.*> # merge det/adj with nouns
"""
cp = chunk.RegexpParser(grammar)
demo_eval(cp, text)
grammar = r"""
NP: {<DT>?<JJ>*<NN>} # chunk determiners, adjectives and nouns
VP: {<TO>?<VB.*>} # VP = verb words
"""
cp = chunk.RegexpParser(grammar)
demo_eval(cp, text)
grammar = r"""
NP: {<.*>*} # start by chunking everything
}<[\.VI].*>+{ # strip any verbs, prepositions or periods
<.*>}{<DT> # separate on determiners
PP: {<IN><NP>} # PP = preposition + noun phrase
VP: {<VB.*><NP|PP>*} # VP = verb words + NPs and PPs
"""
cp = chunk.RegexpParser(grammar)
demo_eval(cp, text)
# Evaluation
from nltk.corpus import conll2000
print()
print("Demonstration of empty grammar:")
cp = chunk.RegexpParser("")
print(chunk.accuracy(cp, conll2000.chunked_sents("test.txt", chunk_types=("NP",))))
print()
print("Demonstration of accuracy evaluation using CoNLL tags:")
grammar = r"""
NP:
{<.*>} # start by chunking each tag
}<[\.VI].*>+{ # unchunk any verbs, prepositions or periods
<DT|JJ>{}<NN.*> # merge det/adj with nouns
"""
cp = chunk.RegexpParser(grammar)
print(chunk.accuracy(cp, conll2000.chunked_sents("test.txt")[:5]))
print()
print("Demonstration of tagged token input")
grammar = r"""
NP: {<.*>*} # start by chunking everything
}<[\.VI].*>+{ # strip any verbs, prepositions or periods
<.*>}{<DT> # separate on determiners
PP: {<IN><NP>} # PP = preposition + noun phrase
VP: {<VB.*><NP|PP>*} # VP = verb words + NPs and PPs
"""
cp = chunk.RegexpParser(grammar)
print(
cp.parse(
[
("the", "DT"),
("little", "JJ"),
("cat", "NN"),
("sat", "VBD"),
("on", "IN"),
("the", "DT"),
("mat", "NN"),
(".", "."),
]
)
) | A demonstration for the ``RegexpChunkParser`` class. A single text is parsed with four different chunk parsers, using a variety of rules and strategies. |
170,905 | import itertools as _itertools
from nltk.metrics import (
BigramAssocMeasures,
ContingencyMeasures,
QuadgramAssocMeasures,
TrigramAssocMeasures,
)
from nltk.metrics.spearman import ranks_from_scores, spearman_correlation
from nltk.probability import FreqDist
from nltk.util import ngrams
class BigramCollocationFinder(AbstractCollocationFinder):
"""A tool for the finding and ranking of bigram collocations or other
association measures. It is often useful to use from_words() rather than
constructing an instance directly.
"""
default_ws = 2
def __init__(self, word_fd, bigram_fd, window_size=2):
"""Construct a BigramCollocationFinder, given FreqDists for
appearances of words and (possibly non-contiguous) bigrams.
"""
AbstractCollocationFinder.__init__(self, word_fd, bigram_fd)
self.window_size = window_size
def from_words(cls, words, window_size=2):
"""Construct a BigramCollocationFinder for all bigrams in the given
sequence. When window_size > 2, count non-contiguous bigrams, in the
style of Church and Hanks's (1990) association ratio.
"""
wfd = FreqDist()
bfd = FreqDist()
if window_size < 2:
raise ValueError("Specify window_size at least 2")
for window in ngrams(words, window_size, pad_right=True):
w1 = window[0]
if w1 is None:
continue
wfd[w1] += 1
for w2 in window[1:]:
if w2 is not None:
bfd[(w1, w2)] += 1
return cls(wfd, bfd, window_size=window_size)
def score_ngram(self, score_fn, w1, w2):
"""Returns the score for a given bigram using the given scoring
function. Following Church and Hanks (1990), counts are scaled by
a factor of 1/(window_size - 1).
"""
n_all = self.N
n_ii = self.ngram_fd[(w1, w2)] / (self.window_size - 1.0)
if not n_ii:
return
n_ix = self.word_fd[w1]
n_xi = self.word_fd[w2]
return score_fn(n_ii, (n_ix, n_xi), n_all)
def spearman_correlation(ranks1, ranks2):
"""Returns the Spearman correlation coefficient for two rankings, which
should be dicts or sequences of (key, rank). The coefficient ranges from
-1.0 (ranks are opposite) to 1.0 (ranks are identical), and is only
calculated for keys in both rankings (for meaningful results, remove keys
present in only one list before ranking)."""
n = 0
res = 0
for k, d in _rank_dists(ranks1, ranks2):
res += d * d
n += 1
try:
return 1 - (6 * res / (n * (n * n - 1)))
except ZeroDivisionError:
# Result is undefined if only one item is ranked
return 0.0
def ranks_from_scores(scores, rank_gap=1e-15):
"""Given a sequence of (key, score) tuples, yields each key with an
increasing rank, tying with previous key's rank if the difference between
their scores is less than rank_gap. Suitable for use as an argument to
``spearman_correlation``.
"""
prev_score = None
rank = 0
for i, (key, score) in enumerate(scores):
try:
if abs(score - prev_score) > rank_gap:
rank = i
except TypeError:
pass
yield key, rank
prev_score = score
stopwords: WordListCorpusReader = LazyCorpusLoader(
"stopwords", WordListCorpusReader, r"(?!README|\.).*", encoding="utf8"
)
webtext: PlaintextCorpusReader = LazyCorpusLoader(
"webtext", PlaintextCorpusReader, r"(?!README|\.).*\.txt", encoding="ISO-8859-2"
)
The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo(scorer=None, compare_scorer=None)` to solve the following problem:
Finds bigram collocations in the files of the WebText corpus.
Here is the function:
def demo(scorer=None, compare_scorer=None):
"""Finds bigram collocations in the files of the WebText corpus."""
from nltk.metrics import (
BigramAssocMeasures,
ranks_from_scores,
spearman_correlation,
)
if scorer is None:
scorer = BigramAssocMeasures.likelihood_ratio
if compare_scorer is None:
compare_scorer = BigramAssocMeasures.raw_freq
from nltk.corpus import stopwords, webtext
ignored_words = stopwords.words("english")
word_filter = lambda w: len(w) < 3 or w.lower() in ignored_words
for file in webtext.fileids():
words = [word.lower() for word in webtext.words(file)]
cf = BigramCollocationFinder.from_words(words)
cf.apply_freq_filter(3)
cf.apply_word_filter(word_filter)
corr = spearman_correlation(
ranks_from_scores(cf.score_ngrams(scorer)),
ranks_from_scores(cf.score_ngrams(compare_scorer)),
)
print(file)
print("\t", [" ".join(tup) for tup in cf.nbest(scorer, 15)])
print(f"\t Correlation to {compare_scorer.__name__}: {corr:0.4f}") | Finds bigram collocations in the files of the WebText corpus. |
170,906 | import html
from typing import List
import regex
from nltk.tokenize.api import TokenizerI
ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);")
def _str_to_unicode(text, encoding=None, errors="strict"):
if encoding is None:
encoding = "utf-8"
if isinstance(text, bytes):
return text.decode(encoding, errors)
return text
The provided code snippet includes necessary dependencies for implementing the `_replace_html_entities` function. Write a Python function `def _replace_html_entities(text, keep=(), remove_illegal=True, encoding="utf-8")` to solve the following problem:
Remove entities from text by converting them to their corresponding unicode character. :param text: a unicode string or a byte string encoded in the given `encoding` (which defaults to 'utf-8'). :param list keep: list of entity names which should not be replaced.\ This supports both numeric entities (``&#nnnn;`` and ``&#hhhh;``) and named entities (such as `` `` or ``>``). :param bool remove_illegal: If `True`, entities that can't be converted are\ removed. Otherwise, entities that can't be converted are kept "as is". :returns: A unicode string with the entities removed. See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py >>> from nltk.tokenize.casual import _replace_html_entities >>> _replace_html_entities(b'Price: £100') 'Price: \\xa3100' >>> print(_replace_html_entities(b'Price: £100')) Price: £100 >>>
Here is the function:
def _replace_html_entities(text, keep=(), remove_illegal=True, encoding="utf-8"):
"""
Remove entities from text by converting them to their
corresponding unicode character.
:param text: a unicode string or a byte string encoded in the given
`encoding` (which defaults to 'utf-8').
:param list keep: list of entity names which should not be replaced.\
This supports both numeric entities (``&#nnnn;`` and ``&#hhhh;``)
and named entities (such as `` `` or ``>``).
:param bool remove_illegal: If `True`, entities that can't be converted are\
removed. Otherwise, entities that can't be converted are kept "as
is".
:returns: A unicode string with the entities removed.
See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py
>>> from nltk.tokenize.casual import _replace_html_entities
>>> _replace_html_entities(b'Price: £100')
'Price: \\xa3100'
>>> print(_replace_html_entities(b'Price: £100'))
Price: £100
>>>
"""
def _convert_entity(match):
entity_body = match.group(3)
if match.group(1):
try:
if match.group(2):
number = int(entity_body, 16)
else:
number = int(entity_body, 10)
# Numeric character references in the 80-9F range are typically
# interpreted by browsers as representing the characters mapped
# to bytes 80-9F in the Windows-1252 encoding. For more info
# see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets
if 0x80 <= number <= 0x9F:
return bytes((number,)).decode("cp1252")
except ValueError:
number = None
else:
if entity_body in keep:
return match.group(0)
number = html.entities.name2codepoint.get(entity_body)
if number is not None:
try:
return chr(number)
except (ValueError, OverflowError):
pass
return "" if remove_illegal else match.group(0)
return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding)) | Remove entities from text by converting them to their corresponding unicode character. :param text: a unicode string or a byte string encoded in the given `encoding` (which defaults to 'utf-8'). :param list keep: list of entity names which should not be replaced.\ This supports both numeric entities (``&#nnnn;`` and ``&#hhhh;``) and named entities (such as `` `` or ``>``). :param bool remove_illegal: If `True`, entities that can't be converted are\ removed. Otherwise, entities that can't be converted are kept "as is". :returns: A unicode string with the entities removed. See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py >>> from nltk.tokenize.casual import _replace_html_entities >>> _replace_html_entities(b'Price: £100') 'Price: \\xa3100' >>> print(_replace_html_entities(b'Price: £100')) Price: £100 >>> |
170,907 | import html
from typing import List
import regex
from nltk.tokenize.api import TokenizerI
The provided code snippet includes necessary dependencies for implementing the `reduce_lengthening` function. Write a Python function `def reduce_lengthening(text)` to solve the following problem:
Replace repeated character sequences of length 3 or greater with sequences of length 3.
Here is the function:
def reduce_lengthening(text):
"""
Replace repeated character sequences of length 3 or greater with sequences
of length 3.
"""
pattern = regex.compile(r"(.)\1{2,}")
return pattern.sub(r"\1\1\1", text) | Replace repeated character sequences of length 3 or greater with sequences of length 3. |
170,908 | import html
from typing import List
import regex
from nltk.tokenize.api import TokenizerI
HANDLES_RE = regex.compile(
r"(?<![A-Za-z0-9_!@#\$%&*])@"
r"(([A-Za-z0-9_]){15}(?!@)|([A-Za-z0-9_]){1,14}(?![A-Za-z0-9_]*@))"
)
The provided code snippet includes necessary dependencies for implementing the `remove_handles` function. Write a Python function `def remove_handles(text)` to solve the following problem:
Remove Twitter username handles from text.
Here is the function:
def remove_handles(text):
"""
Remove Twitter username handles from text.
"""
# Substitute handles with ' ' to ensure that text on either side of removed handles are tokenized correctly
return HANDLES_RE.sub(" ", text) | Remove Twitter username handles from text. |
170,909 | import html
from typing import List
import regex
from nltk.tokenize.api import TokenizerI
class TweetTokenizer(TokenizerI):
r"""
Tokenizer for tweets.
>>> from nltk.tokenize import TweetTokenizer
>>> tknzr = TweetTokenizer()
>>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
>>> tknzr.tokenize(s0) # doctest: +NORMALIZE_WHITESPACE
['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->',
'<--']
Examples using `strip_handles` and `reduce_len parameters`:
>>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
>>> s1 = '@remy: This is waaaaayyyy too much for you!!!!!!'
>>> tknzr.tokenize(s1)
[':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!']
"""
# Values used to lazily compile WORD_RE and PHONE_WORD_RE,
# which are the core tokenizing regexes.
_WORD_RE = None
_PHONE_WORD_RE = None
######################################################################
def __init__(
self,
preserve_case=True,
reduce_len=False,
strip_handles=False,
match_phone_numbers=True,
):
"""
Create a `TweetTokenizer` instance with settings for use in the `tokenize` method.
:param preserve_case: Flag indicating whether to preserve the casing (capitalisation)
of text used in the `tokenize` method. Defaults to True.
:type preserve_case: bool
:param reduce_len: Flag indicating whether to replace repeated character sequences
of length 3 or greater with sequences of length 3. Defaults to False.
:type reduce_len: bool
:param strip_handles: Flag indicating whether to remove Twitter handles of text used
in the `tokenize` method. Defaults to False.
:type strip_handles: bool
:param match_phone_numbers: Flag indicating whether the `tokenize` method should look
for phone numbers. Defaults to True.
:type match_phone_numbers: bool
"""
self.preserve_case = preserve_case
self.reduce_len = reduce_len
self.strip_handles = strip_handles
self.match_phone_numbers = match_phone_numbers
def tokenize(self, text: str) -> List[str]:
"""Tokenize the input text.
:param text: str
:rtype: list(str)
:return: a tokenized list of strings; joining this list returns\
the original string if `preserve_case=False`.
"""
# Fix HTML character entities:
text = _replace_html_entities(text)
# Remove username handles
if self.strip_handles:
text = remove_handles(text)
# Normalize word lengthening
if self.reduce_len:
text = reduce_lengthening(text)
# Shorten problematic sequences of characters
safe_text = HANG_RE.sub(r"\1\1\1", text)
# Recognise phone numbers during tokenization
if self.match_phone_numbers:
words = self.PHONE_WORD_RE.findall(safe_text)
else:
words = self.WORD_RE.findall(safe_text)
# Possibly alter the case, but avoid changing emoticons like :D into :d:
if not self.preserve_case:
words = list(
map((lambda x: x if EMOTICON_RE.search(x) else x.lower()), words)
)
return words
def WORD_RE(self) -> "regex.Pattern":
"""Core TweetTokenizer regex"""
# Compiles the regex for this and all future instantiations of TweetTokenizer.
if not type(self)._WORD_RE:
type(self)._WORD_RE = regex.compile(
f"({'|'.join(REGEXPS)})",
regex.VERBOSE | regex.I | regex.UNICODE,
)
return type(self)._WORD_RE
def PHONE_WORD_RE(self) -> "regex.Pattern":
"""Secondary core TweetTokenizer regex"""
# Compiles the regex for this and all future instantiations of TweetTokenizer.
if not type(self)._PHONE_WORD_RE:
type(self)._PHONE_WORD_RE = regex.compile(
f"({'|'.join(REGEXPS_PHONE)})",
regex.VERBOSE | regex.I | regex.UNICODE,
)
return type(self)._PHONE_WORD_RE
The provided code snippet includes necessary dependencies for implementing the `casual_tokenize` function. Write a Python function `def casual_tokenize( text, preserve_case=True, reduce_len=False, strip_handles=False, match_phone_numbers=True, )` to solve the following problem:
Convenience function for wrapping the tokenizer.
Here is the function:
def casual_tokenize(
text,
preserve_case=True,
reduce_len=False,
strip_handles=False,
match_phone_numbers=True,
):
"""
Convenience function for wrapping the tokenizer.
"""
return TweetTokenizer(
preserve_case=preserve_case,
reduce_len=reduce_len,
strip_handles=strip_handles,
match_phone_numbers=match_phone_numbers,
).tokenize(text) | Convenience function for wrapping the tokenizer. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.