id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
170,510 | import random
import warnings
from abc import ABCMeta, abstractmethod
from bisect import bisect
from itertools import accumulate
from nltk.lm.counter import NgramCounter
from nltk.lm.util import log_base2
from nltk.lm.vocabulary import Vocabulary
def _random_generator(seed_or_generator):
if isinstance(seed_or_generator, random.Random):
return seed_or_generator
return random.Random(seed_or_generator) | null |
170,511 | import random
import warnings
from abc import ABCMeta, abstractmethod
from bisect import bisect
from itertools import accumulate
from nltk.lm.counter import NgramCounter
from nltk.lm.util import log_base2
from nltk.lm.vocabulary import Vocabulary
bisect = bisect_right
The provided code snippet includes necessary dependencies for implementing the `_weighted_choice` function. Write a Python function `def _weighted_choice(population, weights, random_generator=None)` to solve the following problem:
Like random.choice, but with weights. Heavily inspired by python 3.6 `random.choices`.
Here is the function:
def _weighted_choice(population, weights, random_generator=None):
"""Like random.choice, but with weights.
Heavily inspired by python 3.6 `random.choices`.
"""
if not population:
raise ValueError("Can't choose from empty population")
if len(population) != len(weights):
raise ValueError("The number of weights does not match the population")
cum_weights = list(accumulate(weights))
total = cum_weights[-1]
threshold = random_generator.random()
return population[bisect(cum_weights, total * threshold)] | Like random.choice, but with weights. Heavily inspired by python 3.6 `random.choices`. |
170,512 | from math import log
NEG_INF = float("-inf")
def log(x: SupportsFloat, base: SupportsFloat = ...) -> float: ...
The provided code snippet includes necessary dependencies for implementing the `log_base2` function. Write a Python function `def log_base2(score)` to solve the following problem:
Convenience function for computing logarithms with base 2.
Here is the function:
def log_base2(score):
"""Convenience function for computing logarithms with base 2."""
if score == 0.0:
return NEG_INF
return log(score, 2) | Convenience function for computing logarithms with base 2. |
170,513 | from operator import methodcaller
from nltk.lm.api import Smoothing
from nltk.probability import ConditionalFreqDist
def methodcaller(__name: str, *args: Any, **kwargs: Any) -> Callable[..., Any]: ...
class ConditionalFreqDist(defaultdict):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occurred, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
FreqDist for the experiment under that condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.tokenize import word_tokenize
>>> sent = "the the the dog dog some other words that we do not care about"
>>> cfdist = ConditionalFreqDist()
>>> for word in word_tokenize(sent):
... condition = len(word)
... cfdist[condition][word] += 1
An equivalent way to do this is with the initializer:
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
FreqDist({'the': 3, 'dog': 2, 'not': 1})
>>> cfdist[3].freq('the')
0.5
>>> cfdist[3]['dog']
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
``ConditionalFreqDist`` creates a new empty FreqDist for that
condition.
"""
def __init__(self, cond_samples=None):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
:param cond_samples: The samples to initialize the conditional
frequency distribution with
:type cond_samples: Sequence of (condition, sample) tuples
"""
defaultdict.__init__(self, FreqDist)
if cond_samples:
for (cond, sample) in cond_samples:
self[cond][sample] += 1
def __reduce__(self):
kv_pairs = ((cond, self[cond]) for cond in self.conditions())
return (self.__class__, (), None, None, kv_pairs)
def conditions(self):
"""
Return a list of the conditions that have been accessed for
this ``ConditionalFreqDist``. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
:rtype: list
"""
return list(self.keys())
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this ``ConditionalFreqDist``.
:rtype: int
"""
return sum(fdist.N() for fdist in self.values())
def plot(
self,
*args,
samples=None,
title="",
cumulative=False,
percents=False,
conditions=None,
show=True,
**kwargs,
):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True. Additional ``*args`` and
``**kwargs`` are passed to matplotlib's plot function.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param cumulative: Whether the plot is cumulative. (default = False)
:type cumulative: bool
:param percents: Whether the plot uses percents instead of counts. (default = False)
:type percents: bool
:param conditions: The conditions to plot (default is all)
:type conditions: list
:param show: Whether to show the plot, or only return the ax.
:type show: bool
"""
try:
import matplotlib.pyplot as plt # import statement fix
except ImportError as e:
raise ValueError(
"The plot function requires matplotlib to be installed."
"See https://matplotlib.org/"
) from e
if not conditions:
conditions = self.conditions()
else:
conditions = [c for c in conditions if c in self]
if not samples:
samples = sorted({v for c in conditions for v in self[c]})
if "linewidth" not in kwargs:
kwargs["linewidth"] = 2
ax = plt.gca()
if conditions:
freqs = []
for condition in conditions:
if cumulative:
# freqs should be a list of list where each sub list will be a frequency of a condition
freq = list(self[condition]._cumulative_frequencies(samples))
else:
freq = [self[condition][sample] for sample in samples]
if percents:
freq = [f / self[condition].N() * 100 for f in freq]
freqs.append(freq)
if cumulative:
ylabel = "Cumulative "
legend_loc = "lower right"
else:
ylabel = ""
legend_loc = "upper right"
if percents:
ylabel += "Percents"
else:
ylabel += "Counts"
i = 0
for freq in freqs:
kwargs["label"] = conditions[i] # label for each condition
i += 1
ax.plot(freq, *args, **kwargs)
ax.legend(loc=legend_loc)
ax.grid(True, color="silver")
ax.set_xticks(range(len(samples)))
ax.set_xticklabels([str(s) for s in samples], rotation=90)
if title:
ax.set_title(title)
ax.set_xlabel("Samples")
ax.set_ylabel(ylabel)
if show:
plt.show()
return ax
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the conditional frequency distribution.
:param samples: The samples to plot
:type samples: list
:param conditions: The conditions to plot (default is all)
:type conditions: list
:param cumulative: A flag to specify whether the freqs are cumulative (default = False)
:type title: bool
"""
cumulative = _get_kwarg(kwargs, "cumulative", False)
conditions = _get_kwarg(kwargs, "conditions", sorted(self.conditions()))
samples = _get_kwarg(
kwargs,
"samples",
sorted({v for c in conditions if c in self for v in self[c]}),
) # this computation could be wasted
width = max(len("%s" % s) for s in samples)
freqs = dict()
for c in conditions:
if cumulative:
freqs[c] = list(self[c]._cumulative_frequencies(samples))
else:
freqs[c] = [self[c][sample] for sample in samples]
width = max(width, max(len("%d" % f) for f in freqs[c]))
condition_size = max(len("%s" % c) for c in conditions)
print(" " * condition_size, end=" ")
for s in samples:
print("%*s" % (width, s), end=" ")
print()
for c in conditions:
print("%*s" % (condition_size, c), end=" ")
for f in freqs[c]:
print("%*d" % (width, f), end=" ")
print()
# Mathematical operators
def __add__(self, other):
"""
Add counts from two ConditionalFreqDists.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = self.copy()
for cond in other.conditions():
result[cond] += other[cond]
return result
def __sub__(self, other):
"""
Subtract count, but keep only results with positive counts.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = self.copy()
for cond in other.conditions():
result[cond] -= other[cond]
if not result[cond]:
del result[cond]
return result
def __or__(self, other):
"""
Union is the maximum of value in either of the input counters.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = self.copy()
for cond in other.conditions():
result[cond] |= other[cond]
return result
def __and__(self, other):
"""
Intersection is the minimum of corresponding counts.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = ConditionalFreqDist()
for cond in self.conditions():
newfreqdist = self[cond] & other[cond]
if newfreqdist:
result[cond] = newfreqdist
return result
# @total_ordering doesn't work here, since the class inherits from a builtin class
def __le__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<=", self, other)
return set(self.conditions()).issubset(other.conditions()) and all(
self[c] <= other[c] for c in self.conditions()
)
def __lt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<", self, other)
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">=", self, other)
return other <= self
def __gt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">", self, other)
return other < self
def deepcopy(self):
from copy import deepcopy
return deepcopy(self)
copy = deepcopy
def __repr__(self):
"""
Return a string representation of this ``ConditionalFreqDist``.
:rtype: str
"""
return "<ConditionalFreqDist with %d conditions>" % len(self)
The provided code snippet includes necessary dependencies for implementing the `_count_values_gt_zero` function. Write a Python function `def _count_values_gt_zero(distribution)` to solve the following problem:
Count values that are greater than zero in a distribution. Assumes distribution is either a mapping with counts as values or an instance of `nltk.ConditionalFreqDist`.
Here is the function:
def _count_values_gt_zero(distribution):
"""Count values that are greater than zero in a distribution.
Assumes distribution is either a mapping with counts as values or
an instance of `nltk.ConditionalFreqDist`.
"""
as_count = (
methodcaller("N")
if isinstance(distribution, ConditionalFreqDist)
else lambda count: count
)
# We explicitly check that values are > 0 to guard against negative counts.
return sum(
1 for dist_or_count in distribution.values() if as_count(dist_or_count) > 0
) | Count values that are greater than zero in a distribution. Assumes distribution is either a mapping with counts as values or an instance of `nltk.ConditionalFreqDist`. |
170,514 | from functools import partial
from itertools import chain
from nltk.util import everygrams, pad_sequence
pad_both_ends = partial(
pad_sequence,
pad_left=True,
left_pad_symbol="<s>",
pad_right=True,
right_pad_symbol="</s>",
)
pad_both_ends.__doc__ = """Pads both ends of a sentence to length specified by ngram order.
Following convention <s> pads the start of sentence </s> pads its end.
"""
def everygrams(
sequence, min_len=1, max_len=-1, pad_left=False, pad_right=False, **kwargs
):
"""
Returns all possible ngrams generated from a sequence of items, as an iterator.
>>> sent = 'a b c'.split()
New version outputs for everygrams.
>>> list(everygrams(sent))
[('a',), ('a', 'b'), ('a', 'b', 'c'), ('b',), ('b', 'c'), ('c',)]
Old version outputs for everygrams.
>>> sorted(everygrams(sent), key=len)
[('a',), ('b',), ('c',), ('a', 'b'), ('b', 'c'), ('a', 'b', 'c')]
>>> list(everygrams(sent, max_len=2))
[('a',), ('a', 'b'), ('b',), ('b', 'c'), ('c',)]
:param sequence: the source data to be converted into ngrams. If max_len is
not provided, this sequence will be loaded into memory
:type sequence: sequence or iter
:param min_len: minimum length of the ngrams, aka. n-gram order/degree of ngram
:type min_len: int
:param max_len: maximum length of the ngrams (set to length of sequence by default)
:type max_len: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:rtype: iter(tuple)
"""
# Get max_len for padding.
if max_len == -1:
try:
max_len = len(sequence)
except TypeError:
sequence = list(sequence)
max_len = len(sequence)
# Pad if indicated using max_len.
sequence = pad_sequence(sequence, max_len, pad_left, pad_right, **kwargs)
# Sliding window to store grams.
history = list(islice(sequence, max_len))
# Yield ngrams from sequence.
while history:
for ngram_len in range(min_len, len(history) + 1):
yield tuple(history[:ngram_len])
# Append element to history if sequence has more items.
try:
history.append(next(sequence))
except StopIteration:
pass
del history[0]
The provided code snippet includes necessary dependencies for implementing the `padded_everygrams` function. Write a Python function `def padded_everygrams(order, sentence)` to solve the following problem:
Helper with some useful defaults. Applies pad_both_ends to sentence and follows it up with everygrams.
Here is the function:
def padded_everygrams(order, sentence):
"""Helper with some useful defaults.
Applies pad_both_ends to sentence and follows it up with everygrams.
"""
return everygrams(list(pad_both_ends(sentence, n=order)), max_len=order) | Helper with some useful defaults. Applies pad_both_ends to sentence and follows it up with everygrams. |
170,515 | from functools import partial
from itertools import chain
from nltk.util import everygrams, pad_sequence
flatten = chain.from_iterable
pad_both_ends = partial(
pad_sequence,
pad_left=True,
left_pad_symbol="<s>",
pad_right=True,
right_pad_symbol="</s>",
)
pad_both_ends.__doc__ = """Pads both ends of a sentence to length specified by ngram order.
Following convention <s> pads the start of sentence </s> pads its end.
"""
class partial(Generic[_T]):
func: Callable[..., _T]
args: Tuple[Any, ...]
keywords: Dict[str, Any]
def __init__(self, func: Callable[..., _T], *args: Any, **kwargs: Any) -> None: ...
def __call__(self, *args: Any, **kwargs: Any) -> _T: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
def everygrams(
sequence, min_len=1, max_len=-1, pad_left=False, pad_right=False, **kwargs
):
"""
Returns all possible ngrams generated from a sequence of items, as an iterator.
>>> sent = 'a b c'.split()
New version outputs for everygrams.
>>> list(everygrams(sent))
[('a',), ('a', 'b'), ('a', 'b', 'c'), ('b',), ('b', 'c'), ('c',)]
Old version outputs for everygrams.
>>> sorted(everygrams(sent), key=len)
[('a',), ('b',), ('c',), ('a', 'b'), ('b', 'c'), ('a', 'b', 'c')]
>>> list(everygrams(sent, max_len=2))
[('a',), ('a', 'b'), ('b',), ('b', 'c'), ('c',)]
:param sequence: the source data to be converted into ngrams. If max_len is
not provided, this sequence will be loaded into memory
:type sequence: sequence or iter
:param min_len: minimum length of the ngrams, aka. n-gram order/degree of ngram
:type min_len: int
:param max_len: maximum length of the ngrams (set to length of sequence by default)
:type max_len: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:rtype: iter(tuple)
"""
# Get max_len for padding.
if max_len == -1:
try:
max_len = len(sequence)
except TypeError:
sequence = list(sequence)
max_len = len(sequence)
# Pad if indicated using max_len.
sequence = pad_sequence(sequence, max_len, pad_left, pad_right, **kwargs)
# Sliding window to store grams.
history = list(islice(sequence, max_len))
# Yield ngrams from sequence.
while history:
for ngram_len in range(min_len, len(history) + 1):
yield tuple(history[:ngram_len])
# Append element to history if sequence has more items.
try:
history.append(next(sequence))
except StopIteration:
pass
del history[0]
The provided code snippet includes necessary dependencies for implementing the `padded_everygram_pipeline` function. Write a Python function `def padded_everygram_pipeline(order, text)` to solve the following problem:
Default preprocessing for a sequence of sentences. Creates two iterators: - sentences padded and turned into sequences of `nltk.util.everygrams` - sentences padded as above and chained together for a flat stream of words :param order: Largest ngram length produced by `everygrams`. :param text: Text to iterate over. Expected to be an iterable of sentences. :type text: Iterable[Iterable[str]] :return: iterator over text as ngrams, iterator over text as vocabulary data
Here is the function:
def padded_everygram_pipeline(order, text):
"""Default preprocessing for a sequence of sentences.
Creates two iterators:
- sentences padded and turned into sequences of `nltk.util.everygrams`
- sentences padded as above and chained together for a flat stream of words
:param order: Largest ngram length produced by `everygrams`.
:param text: Text to iterate over. Expected to be an iterable of sentences.
:type text: Iterable[Iterable[str]]
:return: iterator over text as ngrams, iterator over text as vocabulary data
"""
padding_fn = partial(pad_both_ends, n=order)
return (
(everygrams(list(padding_fn(sent)), max_len=order) for sent in text),
flatten(map(padding_fn, text)),
) | Default preprocessing for a sequence of sentences. Creates two iterators: - sentences padded and turned into sequences of `nltk.util.everygrams` - sentences padded as above and chained together for a flat stream of words :param order: Largest ngram length produced by `everygrams`. :param text: Text to iterate over. Expected to be an iterable of sentences. :type text: Iterable[Iterable[str]] :return: iterator over text as ngrams, iterator over text as vocabulary data |
170,516 | import sys
from collections import Counter
from collections.abc import Iterable
from functools import singledispatch
from itertools import chain
def _dispatched_lookup(words, vocab):
raise TypeError(f"Unsupported type for looking up in vocabulary: {type(words)}")
The provided code snippet includes necessary dependencies for implementing the `_` function. Write a Python function `def _(words, vocab)` to solve the following problem:
Look up a sequence of words in the vocabulary. Returns an iterator over looked up words.
Here is the function:
def _(words, vocab):
"""Look up a sequence of words in the vocabulary.
Returns an iterator over looked up words.
"""
return tuple(_dispatched_lookup(w, vocab) for w in words) | Look up a sequence of words in the vocabulary. Returns an iterator over looked up words. |
170,517 | import sys
from collections import Counter
from collections.abc import Iterable
from functools import singledispatch
from itertools import chain
The provided code snippet includes necessary dependencies for implementing the `_string_lookup` function. Write a Python function `def _string_lookup(word, vocab)` to solve the following problem:
Looks up one word in the vocabulary.
Here is the function:
def _string_lookup(word, vocab):
"""Looks up one word in the vocabulary."""
return word if word in vocab else vocab.unk_label | Looks up one word in the vocabulary. |
170,518 | import click
from tqdm import tqdm
from nltk import word_tokenize
from nltk.util import parallelize_preprocess
def cli():
pass | null |
170,519 | import click
from tqdm import tqdm
from nltk import word_tokenize
from nltk.util import parallelize_preprocess
def parallelize_preprocess(func, iterator, processes, progress_bar=False):
from joblib import Parallel, delayed
from tqdm import tqdm
iterator = tqdm(iterator) if progress_bar else iterator
if processes <= 1:
return map(func, iterator)
return Parallel(n_jobs=processes)(delayed(func)(line) for line in iterator)
The provided code snippet includes necessary dependencies for implementing the `tokenize_file` function. Write a Python function `def tokenize_file(language, preserve_line, processes, encoding, delimiter)` to solve the following problem:
This command tokenizes text stream using nltk.word_tokenize
Here is the function:
def tokenize_file(language, preserve_line, processes, encoding, delimiter):
"""This command tokenizes text stream using nltk.word_tokenize"""
with click.get_text_stream("stdin", encoding=encoding) as fin:
with click.get_text_stream("stdout", encoding=encoding) as fout:
# If it's single process, joblib parallelization is slower,
# so just process line by line normally.
if processes == 1:
for line in tqdm(fin.readlines()):
print(delimiter.join(word_tokenize(line)), end="\n", file=fout)
else:
for outline in parallelize_preprocess(
word_tokenize, fin.readlines(), processes, progress_bar=True
):
print(delimiter.join(outline), end="\n", file=fout) | This command tokenizes text stream using nltk.word_tokenize |
170,520 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
def usage(obj):
str(obj) # In case it's lazy, this will load it.
if not isinstance(obj, type):
obj = obj.__class__
print(f"{obj.__name__} supports the following operations:")
for (name, method) in sorted(pydoc.allmethods(obj).items()):
if name.startswith("_"):
continue
if getattr(method, "__deprecated__", False):
continue
try:
sig = str(inspect.signature(method))
except ValueError as e:
# builtins sometimes don't support introspection
if "builtin" in str(e):
continue
else:
raise
args = sig.lstrip("(").rstrip(")").split(", ")
meth = inspect.getattr_static(obj, name)
if isinstance(meth, (classmethod, staticmethod)):
name = f"cls.{name}"
elif args and args[0] == "self":
name = f"self.{name}"
args.pop(0)
print(
textwrap.fill(
f"{name}({', '.join(args)})",
initial_indent=" - ",
subsequent_indent=" " * (len(name) + 5),
)
) | null |
170,521 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
The provided code snippet includes necessary dependencies for implementing the `in_idle` function. Write a Python function `def in_idle()` to solve the following problem:
Return True if this function is run within idle. Tkinter programs that are run in idle should never call ``Tk.mainloop``; so this function should be used to gate all calls to ``Tk.mainloop``. :warning: This function works by checking ``sys.stdin``. If the user has modified ``sys.stdin``, then it may return incorrect results. :rtype: bool
Here is the function:
def in_idle():
"""
Return True if this function is run within idle. Tkinter
programs that are run in idle should never call ``Tk.mainloop``; so
this function should be used to gate all calls to ``Tk.mainloop``.
:warning: This function works by checking ``sys.stdin``. If the
user has modified ``sys.stdin``, then it may return incorrect
results.
:rtype: bool
"""
import sys
return sys.stdin.__class__.__name__ in ("PyShell", "RPCProxy") | Return True if this function is run within idle. Tkinter programs that are run in idle should never call ``Tk.mainloop``; so this function should be used to gate all calls to ``Tk.mainloop``. :warning: This function works by checking ``sys.stdin``. If the user has modified ``sys.stdin``, then it may return incorrect results. :rtype: bool |
170,522 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
def islice(iterable: Iterable[_T], stop: Optional[int]) -> Iterator[_T]: ...
def islice(iterable: Iterable[_T], start: Optional[int], stop: Optional[int], step: Optional[int] = ...) -> Iterator[_T]: ...
The provided code snippet includes necessary dependencies for implementing the `pr` function. Write a Python function `def pr(data, start=0, end=None)` to solve the following problem:
Pretty print a sequence of data items :param data: the data stream to print :type data: sequence or iter :param start: the start position :type start: int :param end: the end position :type end: int
Here is the function:
def pr(data, start=0, end=None):
"""
Pretty print a sequence of data items
:param data: the data stream to print
:type data: sequence or iter
:param start: the start position
:type start: int
:param end: the end position
:type end: int
"""
pprint(list(islice(data, start, end))) | Pretty print a sequence of data items :param data: the data stream to print :type data: sequence or iter :param start: the start position :type start: int :param end: the end position :type end: int |
170,523 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
The provided code snippet includes necessary dependencies for implementing the `print_string` function. Write a Python function `def print_string(s, width=70)` to solve the following problem:
Pretty print a string, breaking lines on whitespace :param s: the string to print, consisting of words and spaces :type s: str :param width: the display width :type width: int
Here is the function:
def print_string(s, width=70):
"""
Pretty print a string, breaking lines on whitespace
:param s: the string to print, consisting of words and spaces
:type s: str
:param width: the display width
:type width: int
"""
print("\n".join(textwrap.wrap(s, width=width))) | Pretty print a string, breaking lines on whitespace :param s: the string to print, consisting of words and spaces :type s: str :param width: the display width :type width: int |
170,524 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
The provided code snippet includes necessary dependencies for implementing the `tokenwrap` function. Write a Python function `def tokenwrap(tokens, separator=" ", width=70)` to solve the following problem:
Pretty print a list of text tokens, breaking lines on whitespace :param tokens: the tokens to print :type tokens: list :param separator: the string to use to separate tokens :type separator: str :param width: the display width (default=70) :type width: int
Here is the function:
def tokenwrap(tokens, separator=" ", width=70):
"""
Pretty print a list of text tokens, breaking lines on whitespace
:param tokens: the tokens to print
:type tokens: list
:param separator: the string to use to separate tokens
:type separator: str
:param width: the display width (default=70)
:type width: int
"""
return "\n".join(textwrap.wrap(separator.join(tokens), width=width)) | Pretty print a list of text tokens, breaking lines on whitespace :param tokens: the tokens to print :type tokens: list :param separator: the string to use to separate tokens :type separator: str :param width: the display width (default=70) :type width: int |
170,525 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
The provided code snippet includes necessary dependencies for implementing the `re_show` function. Write a Python function `def re_show(regexp, string, left="{", right="}")` to solve the following problem:
Return a string with markers surrounding the matched substrings. Search str for substrings matching ``regexp`` and wrap the matches with braces. This is convenient for learning about regular expressions. :param regexp: The regular expression. :type regexp: str :param string: The string being matched. :type string: str :param left: The left delimiter (printed before the matched substring) :type left: str :param right: The right delimiter (printed after the matched substring) :type right: str :rtype: str
Here is the function:
def re_show(regexp, string, left="{", right="}"):
"""
Return a string with markers surrounding the matched substrings.
Search str for substrings matching ``regexp`` and wrap the matches
with braces. This is convenient for learning about regular expressions.
:param regexp: The regular expression.
:type regexp: str
:param string: The string being matched.
:type string: str
:param left: The left delimiter (printed before the matched substring)
:type left: str
:param right: The right delimiter (printed after the matched substring)
:type right: str
:rtype: str
"""
print(re.compile(regexp, re.M).sub(left + r"\g<0>" + right, string.rstrip())) | Return a string with markers surrounding the matched substrings. Search str for substrings matching ``regexp`` and wrap the matches with braces. This is convenient for learning about regular expressions. :param regexp: The regular expression. :type regexp: str :param string: The string being matched. :type string: str :param left: The left delimiter (printed before the matched substring) :type left: str :param right: The right delimiter (printed after the matched substring) :type right: str :rtype: str |
170,526 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
def filestring(f):
if hasattr(f, "read"):
return f.read()
elif isinstance(f, str):
with open(f) as infile:
return infile.read()
else:
raise ValueError("Must be called with a filename or file-like object") | null |
170,527 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
class deque(Sized, Iterable[_T], Reversible[_T], Generic[_T]):
def __init__(self, iterable: Iterable[_T] = ..., maxlen: int = ...) -> None: ...
def maxlen(self) -> Optional[int]: ...
def append(self, x: _T) -> None: ...
def appendleft(self, x: _T) -> None: ...
def clear(self) -> None: ...
def count(self, x: _T) -> int: ...
def extend(self, iterable: Iterable[_T]) -> None: ...
def extendleft(self, iterable: Iterable[_T]) -> None: ...
def pop(self) -> _T: ...
def popleft(self) -> _T: ...
def remove(self, value: _T) -> None: ...
def reverse(self) -> None: ...
def rotate(self, n: int = ...) -> None: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[_T]: ...
def __str__(self) -> str: ...
def __hash__(self) -> int: ...
def __getitem__(self, i: int) -> _T: ...
def __setitem__(self, i: int, x: _T) -> None: ...
def __contains__(self, o: _T) -> bool: ...
def __reversed__(self) -> Iterator[_T]: ...
def __iadd__(self: _S, iterable: Iterable[_T]) -> _S: ...
The provided code snippet includes necessary dependencies for implementing the `breadth_first` function. Write a Python function `def breadth_first(tree, children=iter, maxdepth=-1)` to solve the following problem:
Traverse the nodes of a tree in breadth-first order. (No check for cycles.) The first argument should be the tree root; children should be a function taking as argument a tree node and returning an iterator of the node's children.
Here is the function:
def breadth_first(tree, children=iter, maxdepth=-1):
"""Traverse the nodes of a tree in breadth-first order.
(No check for cycles.)
The first argument should be the tree root;
children should be a function taking as argument a tree node
and returning an iterator of the node's children.
"""
queue = deque([(tree, 0)])
while queue:
node, depth = queue.popleft()
yield node
if depth != maxdepth:
try:
queue.extend((c, depth + 1) for c in children(node))
except TypeError:
pass | Traverse the nodes of a tree in breadth-first order. (No check for cycles.) The first argument should be the tree root; children should be a function taking as argument a tree node and returning an iterator of the node's children. |
170,528 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
def edge_closure(tree, children=iter, maxdepth=-1, verbose=False):
"""Yield the edges of a graph in breadth-first order,
discarding eventual cycles.
The first argument should be the start node;
children should be a function taking as argument a graph node
and returning an iterator of the node's children.
>>> from nltk.util import edge_closure
>>> print(list(edge_closure('A', lambda node:{'A':['B','C'], 'B':'C', 'C':'B'}[node])))
[('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')]
"""
traversed = set()
edges = set()
queue = deque([(tree, 0)])
while queue:
node, depth = queue.popleft()
traversed.add(node)
if depth != maxdepth:
try:
for child in children(node):
if child not in traversed:
queue.append((child, depth + 1))
else:
if verbose:
warnings.warn(
f"Discarded redundant search for {child} at depth {depth + 1}",
stacklevel=2,
)
edge = (node, child)
if edge not in edges:
yield edge
edges.add(edge)
except TypeError:
pass
def edges2dot(edges, shapes=None, attr=None):
"""
:param edges: the set (or list) of edges of a directed graph.
:return dot_string: a representation of 'edges' as a string in the DOT
graph language, which can be converted to an image by the 'dot' program
from the Graphviz package, or nltk.parse.dependencygraph.dot2img(dot_string).
:param shapes: dictionary of strings that trigger a specified shape.
:param attr: dictionary with global graph attributes
>>> import nltk
>>> from nltk.util import edges2dot
>>> print(edges2dot([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')]))
digraph G {
"A" -> "B";
"A" -> "C";
"B" -> "C";
"C" -> "B";
}
<BLANKLINE>
"""
if not shapes:
shapes = dict()
if not attr:
attr = dict()
dot_string = "digraph G {\n"
for pair in attr.items():
dot_string += f"{pair[0]} = {pair[1]};\n"
for edge in edges:
for shape in shapes.items():
for node in range(2):
if shape[0] in repr(edge[node]):
dot_string += f'"{edge[node]}" [shape = {shape[1]}];\n'
dot_string += f'"{edge[0]}" -> "{edge[1]}";\n'
dot_string += "}\n"
return dot_string
def unweighted_minimum_spanning_dict(tree, children=iter):
"""
Output a dictionary representing a Minimum Spanning Tree (MST)
of an unweighted graph, by traversing the nodes of a tree in
breadth-first order, discarding eventual cycles.
The first argument should be the tree root;
children should be a function taking as argument a tree node
and returning an iterator of the node's children.
>>> import nltk
>>> from nltk.corpus import wordnet as wn
>>> from nltk.util import unweighted_minimum_spanning_dict as umsd
>>> from pprint import pprint
>>> pprint(umsd(wn.synset('bound.a.01'), lambda s:s.also_sees()))
{Synset('bound.a.01'): [Synset('unfree.a.02')],
Synset('classified.a.02'): [],
Synset('confined.a.02'): [],
Synset('dependent.a.01'): [],
Synset('restricted.a.01'): [Synset('classified.a.02')],
Synset('unfree.a.02'): [Synset('confined.a.02'),
Synset('dependent.a.01'),
Synset('restricted.a.01')]}
"""
traversed = set() # Empty set of traversed nodes
queue = deque([tree]) # Initialize queue
agenda = {tree} # Set of all nodes ever queued
mstdic = {} # Empty MST dictionary
while queue:
node = queue.popleft() # Node is not yet in the MST dictionary,
mstdic[node] = [] # so add it with an empty list of children
if node not in traversed: # Avoid cycles
traversed.add(node)
for child in children(node):
if child not in agenda: # Queue nodes only once
mstdic[node].append(child) # Add child to the MST
queue.append(child) # Add child to queue
agenda.add(child)
return mstdic
The provided code snippet includes necessary dependencies for implementing the `unweighted_minimum_spanning_digraph` function. Write a Python function `def unweighted_minimum_spanning_digraph(tree, children=iter, shapes=None, attr=None)` to solve the following problem:
Build a Minimum Spanning Tree (MST) of an unweighted graph, by traversing the nodes of a tree in breadth-first order, discarding eventual cycles. Return a representation of this MST as a string in the DOT graph language, which can be converted to an image by the 'dot' program from the Graphviz package, or nltk.parse.dependencygraph.dot2img(dot_string). The first argument should be the tree root; children should be a function taking as argument a tree node and returning an iterator of the node's children. >>> import nltk >>> wn=nltk.corpus.wordnet >>> from nltk.util import unweighted_minimum_spanning_digraph as umsd >>> print(umsd(wn.synset('bound.a.01'), lambda s:s.also_sees())) digraph G { "Synset('bound.a.01')" -> "Synset('unfree.a.02')"; "Synset('unfree.a.02')" -> "Synset('confined.a.02')"; "Synset('unfree.a.02')" -> "Synset('dependent.a.01')"; "Synset('unfree.a.02')" -> "Synset('restricted.a.01')"; "Synset('restricted.a.01')" -> "Synset('classified.a.02')"; } <BLANKLINE>
Here is the function:
def unweighted_minimum_spanning_digraph(tree, children=iter, shapes=None, attr=None):
"""
Build a Minimum Spanning Tree (MST) of an unweighted graph,
by traversing the nodes of a tree in breadth-first order,
discarding eventual cycles.
Return a representation of this MST as a string in the DOT graph language,
which can be converted to an image by the 'dot' program from the Graphviz
package, or nltk.parse.dependencygraph.dot2img(dot_string).
The first argument should be the tree root;
children should be a function taking as argument a tree node
and returning an iterator of the node's children.
>>> import nltk
>>> wn=nltk.corpus.wordnet
>>> from nltk.util import unweighted_minimum_spanning_digraph as umsd
>>> print(umsd(wn.synset('bound.a.01'), lambda s:s.also_sees()))
digraph G {
"Synset('bound.a.01')" -> "Synset('unfree.a.02')";
"Synset('unfree.a.02')" -> "Synset('confined.a.02')";
"Synset('unfree.a.02')" -> "Synset('dependent.a.01')";
"Synset('unfree.a.02')" -> "Synset('restricted.a.01')";
"Synset('restricted.a.01')" -> "Synset('classified.a.02')";
}
<BLANKLINE>
"""
return edges2dot(
edge_closure(
tree, lambda node: unweighted_minimum_spanning_dict(tree, children)[node]
),
shapes,
attr,
) | Build a Minimum Spanning Tree (MST) of an unweighted graph, by traversing the nodes of a tree in breadth-first order, discarding eventual cycles. Return a representation of this MST as a string in the DOT graph language, which can be converted to an image by the 'dot' program from the Graphviz package, or nltk.parse.dependencygraph.dot2img(dot_string). The first argument should be the tree root; children should be a function taking as argument a tree node and returning an iterator of the node's children. >>> import nltk >>> wn=nltk.corpus.wordnet >>> from nltk.util import unweighted_minimum_spanning_digraph as umsd >>> print(umsd(wn.synset('bound.a.01'), lambda s:s.also_sees())) digraph G { "Synset('bound.a.01')" -> "Synset('unfree.a.02')"; "Synset('unfree.a.02')" -> "Synset('confined.a.02')"; "Synset('unfree.a.02')" -> "Synset('dependent.a.01')"; "Synset('unfree.a.02')" -> "Synset('restricted.a.01')"; "Synset('restricted.a.01')" -> "Synset('classified.a.02')"; } <BLANKLINE> |
170,529 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
class deque(Sized, Iterable[_T], Reversible[_T], Generic[_T]):
def __init__(self, iterable: Iterable[_T] = ..., maxlen: int = ...) -> None: ...
def maxlen(self) -> Optional[int]: ...
def append(self, x: _T) -> None: ...
def appendleft(self, x: _T) -> None: ...
def clear(self) -> None: ...
def count(self, x: _T) -> int: ...
def extend(self, iterable: Iterable[_T]) -> None: ...
def extendleft(self, iterable: Iterable[_T]) -> None: ...
def pop(self) -> _T: ...
def popleft(self) -> _T: ...
def remove(self, value: _T) -> None: ...
def reverse(self) -> None: ...
def rotate(self, n: int = ...) -> None: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[_T]: ...
def __str__(self) -> str: ...
def __hash__(self) -> int: ...
def __getitem__(self, i: int) -> _T: ...
def __setitem__(self, i: int, x: _T) -> None: ...
def __contains__(self, o: _T) -> bool: ...
def __reversed__(self) -> Iterator[_T]: ...
def __iadd__(self: _S, iterable: Iterable[_T]) -> _S: ...
The provided code snippet includes necessary dependencies for implementing the `acyclic_breadth_first` function. Write a Python function `def acyclic_breadth_first(tree, children=iter, maxdepth=-1)` to solve the following problem:
Traverse the nodes of a tree in breadth-first order, discarding eventual cycles. The first argument should be the tree root; children should be a function taking as argument a tree node and returning an iterator of the node's children.
Here is the function:
def acyclic_breadth_first(tree, children=iter, maxdepth=-1):
"""Traverse the nodes of a tree in breadth-first order,
discarding eventual cycles.
The first argument should be the tree root;
children should be a function taking as argument a tree node
and returning an iterator of the node's children.
"""
traversed = set()
queue = deque([(tree, 0)])
while queue:
node, depth = queue.popleft()
yield node
traversed.add(node)
if depth != maxdepth:
try:
for child in children(node):
if child not in traversed:
queue.append((child, depth + 1))
else:
warnings.warn(
"Discarded redundant search for {} at depth {}".format(
child, depth + 1
),
stacklevel=2,
)
except TypeError:
pass | Traverse the nodes of a tree in breadth-first order, discarding eventual cycles. The first argument should be the tree root; children should be a function taking as argument a tree node and returning an iterator of the node's children. |
170,530 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
The provided code snippet includes necessary dependencies for implementing the `acyclic_depth_first` function. Write a Python function `def acyclic_depth_first(tree, children=iter, depth=-1, cut_mark=None, traversed=None)` to solve the following problem:
Traverse the nodes of a tree in depth-first order, discarding eventual cycles within any branch, adding cut_mark (when specified) if cycles were truncated. The first argument should be the tree root; children should be a function taking as argument a tree node and returning an iterator of the node's children. Catches all cycles: >>> import nltk >>> from nltk.util import acyclic_depth_first as acyclic_tree >>> wn=nltk.corpus.wordnet >>> from pprint import pprint >>> pprint(acyclic_tree(wn.synset('dog.n.01'), lambda s:s.hypernyms(),cut_mark='...')) [Synset('dog.n.01'), [Synset('canine.n.02'), [Synset('carnivore.n.01'), [Synset('placental.n.01'), [Synset('mammal.n.01'), [Synset('vertebrate.n.01'), [Synset('chordate.n.01'), [Synset('animal.n.01'), [Synset('organism.n.01'), [Synset('living_thing.n.01'), [Synset('whole.n.02'), [Synset('object.n.01'), [Synset('physical_entity.n.01'), [Synset('entity.n.01')]]]]]]]]]]]]], [Synset('domestic_animal.n.01'), "Cycle(Synset('animal.n.01'),-3,...)"]]
Here is the function:
def acyclic_depth_first(tree, children=iter, depth=-1, cut_mark=None, traversed=None):
"""Traverse the nodes of a tree in depth-first order,
discarding eventual cycles within any branch,
adding cut_mark (when specified) if cycles were truncated.
The first argument should be the tree root;
children should be a function taking as argument a tree node
and returning an iterator of the node's children.
Catches all cycles:
>>> import nltk
>>> from nltk.util import acyclic_depth_first as acyclic_tree
>>> wn=nltk.corpus.wordnet
>>> from pprint import pprint
>>> pprint(acyclic_tree(wn.synset('dog.n.01'), lambda s:s.hypernyms(),cut_mark='...'))
[Synset('dog.n.01'),
[Synset('canine.n.02'),
[Synset('carnivore.n.01'),
[Synset('placental.n.01'),
[Synset('mammal.n.01'),
[Synset('vertebrate.n.01'),
[Synset('chordate.n.01'),
[Synset('animal.n.01'),
[Synset('organism.n.01'),
[Synset('living_thing.n.01'),
[Synset('whole.n.02'),
[Synset('object.n.01'),
[Synset('physical_entity.n.01'),
[Synset('entity.n.01')]]]]]]]]]]]]],
[Synset('domestic_animal.n.01'), "Cycle(Synset('animal.n.01'),-3,...)"]]
"""
if traversed is None:
traversed = {tree}
out_tree = [tree]
if depth != 0:
try:
for child in children(tree):
if child not in traversed:
# Recurse with a common "traversed" set for all children:
traversed.add(child)
out_tree += [
acyclic_depth_first(
child, children, depth - 1, cut_mark, traversed
)
]
else:
warnings.warn(
"Discarded redundant search for {} at depth {}".format(
child, depth - 1
),
stacklevel=3,
)
if cut_mark:
out_tree += [f"Cycle({child},{depth - 1},{cut_mark})"]
except TypeError:
pass
elif cut_mark:
out_tree += [cut_mark]
return out_tree | Traverse the nodes of a tree in depth-first order, discarding eventual cycles within any branch, adding cut_mark (when specified) if cycles were truncated. The first argument should be the tree root; children should be a function taking as argument a tree node and returning an iterator of the node's children. Catches all cycles: >>> import nltk >>> from nltk.util import acyclic_depth_first as acyclic_tree >>> wn=nltk.corpus.wordnet >>> from pprint import pprint >>> pprint(acyclic_tree(wn.synset('dog.n.01'), lambda s:s.hypernyms(),cut_mark='...')) [Synset('dog.n.01'), [Synset('canine.n.02'), [Synset('carnivore.n.01'), [Synset('placental.n.01'), [Synset('mammal.n.01'), [Synset('vertebrate.n.01'), [Synset('chordate.n.01'), [Synset('animal.n.01'), [Synset('organism.n.01'), [Synset('living_thing.n.01'), [Synset('whole.n.02'), [Synset('object.n.01'), [Synset('physical_entity.n.01'), [Synset('entity.n.01')]]]]]]]]]]]]], [Synset('domestic_animal.n.01'), "Cycle(Synset('animal.n.01'),-3,...)"]] |
170,531 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
The provided code snippet includes necessary dependencies for implementing the `acyclic_branches_depth_first` function. Write a Python function `def acyclic_branches_depth_first( tree, children=iter, depth=-1, cut_mark=None, traversed=None )` to solve the following problem:
Traverse the nodes of a tree in depth-first order, discarding eventual cycles within the same branch, but keep duplicate paths in different branches. Add cut_mark (when defined) if cycles were truncated. The first argument should be the tree root; children should be a function taking as argument a tree node and returning an iterator of the node's children. Catches only only cycles within the same branch, but keeping cycles from different branches: >>> import nltk >>> from nltk.util import acyclic_branches_depth_first as tree >>> wn=nltk.corpus.wordnet >>> from pprint import pprint >>> pprint(tree(wn.synset('certified.a.01'), lambda s:s.also_sees(), cut_mark='...', depth=4)) [Synset('certified.a.01'), [Synset('authorized.a.01'), [Synset('lawful.a.01'), [Synset('legal.a.01'), "Cycle(Synset('lawful.a.01'),0,...)", [Synset('legitimate.a.01'), '...']], [Synset('straight.a.06'), [Synset('honest.a.01'), '...'], "Cycle(Synset('lawful.a.01'),0,...)"]], [Synset('legitimate.a.01'), "Cycle(Synset('authorized.a.01'),1,...)", [Synset('legal.a.01'), [Synset('lawful.a.01'), '...'], "Cycle(Synset('legitimate.a.01'),0,...)"], [Synset('valid.a.01'), "Cycle(Synset('legitimate.a.01'),0,...)", [Synset('reasonable.a.01'), '...']]], [Synset('official.a.01'), "Cycle(Synset('authorized.a.01'),1,...)"]], [Synset('documented.a.01')]]
Here is the function:
def acyclic_branches_depth_first(
tree, children=iter, depth=-1, cut_mark=None, traversed=None
):
"""Traverse the nodes of a tree in depth-first order,
discarding eventual cycles within the same branch,
but keep duplicate paths in different branches.
Add cut_mark (when defined) if cycles were truncated.
The first argument should be the tree root;
children should be a function taking as argument a tree node
and returning an iterator of the node's children.
Catches only only cycles within the same branch,
but keeping cycles from different branches:
>>> import nltk
>>> from nltk.util import acyclic_branches_depth_first as tree
>>> wn=nltk.corpus.wordnet
>>> from pprint import pprint
>>> pprint(tree(wn.synset('certified.a.01'), lambda s:s.also_sees(), cut_mark='...', depth=4))
[Synset('certified.a.01'),
[Synset('authorized.a.01'),
[Synset('lawful.a.01'),
[Synset('legal.a.01'),
"Cycle(Synset('lawful.a.01'),0,...)",
[Synset('legitimate.a.01'), '...']],
[Synset('straight.a.06'),
[Synset('honest.a.01'), '...'],
"Cycle(Synset('lawful.a.01'),0,...)"]],
[Synset('legitimate.a.01'),
"Cycle(Synset('authorized.a.01'),1,...)",
[Synset('legal.a.01'),
[Synset('lawful.a.01'), '...'],
"Cycle(Synset('legitimate.a.01'),0,...)"],
[Synset('valid.a.01'),
"Cycle(Synset('legitimate.a.01'),0,...)",
[Synset('reasonable.a.01'), '...']]],
[Synset('official.a.01'), "Cycle(Synset('authorized.a.01'),1,...)"]],
[Synset('documented.a.01')]]
"""
if traversed is None:
traversed = {tree}
out_tree = [tree]
if depth != 0:
try:
for child in children(tree):
if child not in traversed:
# Recurse with a different "traversed" set for each child:
out_tree += [
acyclic_branches_depth_first(
child,
children,
depth - 1,
cut_mark,
traversed.union({child}),
)
]
else:
warnings.warn(
"Discarded redundant search for {} at depth {}".format(
child, depth - 1
),
stacklevel=3,
)
if cut_mark:
out_tree += [f"Cycle({child},{depth - 1},{cut_mark})"]
except TypeError:
pass
elif cut_mark:
out_tree += [cut_mark]
return out_tree | Traverse the nodes of a tree in depth-first order, discarding eventual cycles within the same branch, but keep duplicate paths in different branches. Add cut_mark (when defined) if cycles were truncated. The first argument should be the tree root; children should be a function taking as argument a tree node and returning an iterator of the node's children. Catches only only cycles within the same branch, but keeping cycles from different branches: >>> import nltk >>> from nltk.util import acyclic_branches_depth_first as tree >>> wn=nltk.corpus.wordnet >>> from pprint import pprint >>> pprint(tree(wn.synset('certified.a.01'), lambda s:s.also_sees(), cut_mark='...', depth=4)) [Synset('certified.a.01'), [Synset('authorized.a.01'), [Synset('lawful.a.01'), [Synset('legal.a.01'), "Cycle(Synset('lawful.a.01'),0,...)", [Synset('legitimate.a.01'), '...']], [Synset('straight.a.06'), [Synset('honest.a.01'), '...'], "Cycle(Synset('lawful.a.01'),0,...)"]], [Synset('legitimate.a.01'), "Cycle(Synset('authorized.a.01'),1,...)", [Synset('legal.a.01'), [Synset('lawful.a.01'), '...'], "Cycle(Synset('legitimate.a.01'),0,...)"], [Synset('valid.a.01'), "Cycle(Synset('legitimate.a.01'),0,...)", [Synset('reasonable.a.01'), '...']]], [Synset('official.a.01'), "Cycle(Synset('authorized.a.01'),1,...)"]], [Synset('documented.a.01')]] |
170,532 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
def acyclic_dic2tree(node, dic):
"""Convert acyclic dictionary 'dic', where the keys are nodes, and the
values are lists of children, to output tree suitable for pprint(),
starting at root 'node', with subtrees as nested lists."""
return [node] + [acyclic_dic2tree(child, dic) for child in dic[node]]
def unweighted_minimum_spanning_dict(tree, children=iter):
"""
Output a dictionary representing a Minimum Spanning Tree (MST)
of an unweighted graph, by traversing the nodes of a tree in
breadth-first order, discarding eventual cycles.
The first argument should be the tree root;
children should be a function taking as argument a tree node
and returning an iterator of the node's children.
>>> import nltk
>>> from nltk.corpus import wordnet as wn
>>> from nltk.util import unweighted_minimum_spanning_dict as umsd
>>> from pprint import pprint
>>> pprint(umsd(wn.synset('bound.a.01'), lambda s:s.also_sees()))
{Synset('bound.a.01'): [Synset('unfree.a.02')],
Synset('classified.a.02'): [],
Synset('confined.a.02'): [],
Synset('dependent.a.01'): [],
Synset('restricted.a.01'): [Synset('classified.a.02')],
Synset('unfree.a.02'): [Synset('confined.a.02'),
Synset('dependent.a.01'),
Synset('restricted.a.01')]}
"""
traversed = set() # Empty set of traversed nodes
queue = deque([tree]) # Initialize queue
agenda = {tree} # Set of all nodes ever queued
mstdic = {} # Empty MST dictionary
while queue:
node = queue.popleft() # Node is not yet in the MST dictionary,
mstdic[node] = [] # so add it with an empty list of children
if node not in traversed: # Avoid cycles
traversed.add(node)
for child in children(node):
if child not in agenda: # Queue nodes only once
mstdic[node].append(child) # Add child to the MST
queue.append(child) # Add child to queue
agenda.add(child)
return mstdic
The provided code snippet includes necessary dependencies for implementing the `unweighted_minimum_spanning_tree` function. Write a Python function `def unweighted_minimum_spanning_tree(tree, children=iter)` to solve the following problem:
Output a Minimum Spanning Tree (MST) of an unweighted graph, by traversing the nodes of a tree in breadth-first order, discarding eventual cycles. The first argument should be the tree root; children should be a function taking as argument a tree node and returning an iterator of the node's children. >>> import nltk >>> from nltk.util import unweighted_minimum_spanning_tree as mst >>> wn=nltk.corpus.wordnet >>> from pprint import pprint >>> pprint(mst(wn.synset('bound.a.01'), lambda s:s.also_sees())) [Synset('bound.a.01'), [Synset('unfree.a.02'), [Synset('confined.a.02')], [Synset('dependent.a.01')], [Synset('restricted.a.01'), [Synset('classified.a.02')]]]]
Here is the function:
def unweighted_minimum_spanning_tree(tree, children=iter):
"""
Output a Minimum Spanning Tree (MST) of an unweighted graph,
by traversing the nodes of a tree in breadth-first order,
discarding eventual cycles.
The first argument should be the tree root;
children should be a function taking as argument a tree node
and returning an iterator of the node's children.
>>> import nltk
>>> from nltk.util import unweighted_minimum_spanning_tree as mst
>>> wn=nltk.corpus.wordnet
>>> from pprint import pprint
>>> pprint(mst(wn.synset('bound.a.01'), lambda s:s.also_sees()))
[Synset('bound.a.01'),
[Synset('unfree.a.02'),
[Synset('confined.a.02')],
[Synset('dependent.a.01')],
[Synset('restricted.a.01'), [Synset('classified.a.02')]]]]
"""
return acyclic_dic2tree(tree, unweighted_minimum_spanning_dict(tree, children)) | Output a Minimum Spanning Tree (MST) of an unweighted graph, by traversing the nodes of a tree in breadth-first order, discarding eventual cycles. The first argument should be the tree root; children should be a function taking as argument a tree node and returning an iterator of the node's children. >>> import nltk >>> from nltk.util import unweighted_minimum_spanning_tree as mst >>> wn=nltk.corpus.wordnet >>> from pprint import pprint >>> pprint(mst(wn.synset('bound.a.01'), lambda s:s.also_sees())) [Synset('bound.a.01'), [Synset('unfree.a.02'), [Synset('confined.a.02')], [Synset('dependent.a.01')], [Synset('restricted.a.01'), [Synset('classified.a.02')]]]] |
170,533 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
The provided code snippet includes necessary dependencies for implementing the `guess_encoding` function. Write a Python function `def guess_encoding(data)` to solve the following problem:
Given a byte string, attempt to decode it. Tries the standard 'UTF8' and 'latin-1' encodings, Plus several gathered from locale information. The calling program *must* first call:: locale.setlocale(locale.LC_ALL, '') If successful it returns ``(decoded_unicode, successful_encoding)``. If unsuccessful it raises a ``UnicodeError``.
Here is the function:
def guess_encoding(data):
"""
Given a byte string, attempt to decode it.
Tries the standard 'UTF8' and 'latin-1' encodings,
Plus several gathered from locale information.
The calling program *must* first call::
locale.setlocale(locale.LC_ALL, '')
If successful it returns ``(decoded_unicode, successful_encoding)``.
If unsuccessful it raises a ``UnicodeError``.
"""
successful_encoding = None
# we make 'utf-8' the first encoding
encodings = ["utf-8"]
#
# next we add anything we can learn from the locale
try:
encodings.append(locale.nl_langinfo(locale.CODESET))
except AttributeError:
pass
try:
encodings.append(locale.getlocale()[1])
except (AttributeError, IndexError):
pass
try:
encodings.append(locale.getdefaultlocale()[1])
except (AttributeError, IndexError):
pass
#
# we try 'latin-1' last
encodings.append("latin-1")
for enc in encodings:
# some of the locale calls
# may have returned None
if not enc:
continue
try:
decoded = str(data, enc)
successful_encoding = enc
except (UnicodeError, LookupError):
pass
else:
break
if not successful_encoding:
raise UnicodeError(
"Unable to decode input data. "
"Tried the following encodings: %s."
% ", ".join([repr(enc) for enc in encodings if enc])
)
else:
return (decoded, successful_encoding) | Given a byte string, attempt to decode it. Tries the standard 'UTF8' and 'latin-1' encodings, Plus several gathered from locale information. The calling program *must* first call:: locale.setlocale(locale.LC_ALL, '') If successful it returns ``(decoded_unicode, successful_encoding)``. If unsuccessful it raises a ``UnicodeError``. |
170,534 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
def unique_list(xs):
seen = set()
# not seen.add(x) here acts to make the code shorter without using if statements, seen.add(x) always returns None.
return [x for x in xs if x not in seen and not seen.add(x)] | null |
170,535 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
class defaultdict(Dict[_KT, _VT], Generic[_KT, _VT]):
default_factory: Callable[[], _VT]
def __init__(self, **kwargs: _VT) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]]) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]], **kwargs: _VT) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT]) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]]) -> None: ...
def __init__(
self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT
) -> None: ...
def __missing__(self, key: _KT) -> _VT: ...
def copy(self: _S) -> _S: ...
def invert_dict(d):
inverted_dict = defaultdict(list)
for key in d:
if hasattr(d[key], "__iter__"):
for term in d[key]:
inverted_dict[term].append(key)
else:
inverted_dict[d[key]] = key
return inverted_dict | null |
170,536 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
The provided code snippet includes necessary dependencies for implementing the `transitive_closure` function. Write a Python function `def transitive_closure(graph, reflexive=False)` to solve the following problem:
Calculate the transitive closure of a directed graph, optionally the reflexive transitive closure. The algorithm is a slight modification of the "Marking Algorithm" of Ioannidis & Ramakrishnan (1998) "Efficient Transitive Closure Algorithms". :param graph: the initial graph, represented as a dictionary of sets :type graph: dict(set) :param reflexive: if set, also make the closure reflexive :type reflexive: bool :rtype: dict(set)
Here is the function:
def transitive_closure(graph, reflexive=False):
"""
Calculate the transitive closure of a directed graph,
optionally the reflexive transitive closure.
The algorithm is a slight modification of the "Marking Algorithm" of
Ioannidis & Ramakrishnan (1998) "Efficient Transitive Closure Algorithms".
:param graph: the initial graph, represented as a dictionary of sets
:type graph: dict(set)
:param reflexive: if set, also make the closure reflexive
:type reflexive: bool
:rtype: dict(set)
"""
if reflexive:
base_set = lambda k: {k}
else:
base_set = lambda k: set()
# The graph U_i in the article:
agenda_graph = {k: graph[k].copy() for k in graph}
# The graph M_i in the article:
closure_graph = {k: base_set(k) for k in graph}
for i in graph:
agenda = agenda_graph[i]
closure = closure_graph[i]
while agenda:
j = agenda.pop()
closure.add(j)
closure |= closure_graph.setdefault(j, base_set(j))
agenda |= agenda_graph.get(j, base_set(j))
agenda -= closure
return closure_graph | Calculate the transitive closure of a directed graph, optionally the reflexive transitive closure. The algorithm is a slight modification of the "Marking Algorithm" of Ioannidis & Ramakrishnan (1998) "Efficient Transitive Closure Algorithms". :param graph: the initial graph, represented as a dictionary of sets :type graph: dict(set) :param reflexive: if set, also make the closure reflexive :type reflexive: bool :rtype: dict(set) |
170,537 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
The provided code snippet includes necessary dependencies for implementing the `invert_graph` function. Write a Python function `def invert_graph(graph)` to solve the following problem:
Inverts a directed graph. :param graph: the graph, represented as a dictionary of sets :type graph: dict(set) :return: the inverted graph :rtype: dict(set)
Here is the function:
def invert_graph(graph):
"""
Inverts a directed graph.
:param graph: the graph, represented as a dictionary of sets
:type graph: dict(set)
:return: the inverted graph
:rtype: dict(set)
"""
inverted = {}
for key in graph:
for value in graph[key]:
inverted.setdefault(value, set()).add(key)
return inverted | Inverts a directed graph. :param graph: the graph, represented as a dictionary of sets :type graph: dict(set) :return: the inverted graph :rtype: dict(set) |
170,538 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
def clean_html(html):
raise NotImplementedError(
"To remove HTML markup, use BeautifulSoup's get_text() function"
) | null |
170,539 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
def clean_url(url):
raise NotImplementedError(
"To remove HTML markup, use BeautifulSoup's get_text() function"
) | null |
170,540 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
def ngrams(sequence, n, **kwargs):
"""
Return the ngrams generated from a sequence of items, as an iterator.
For example:
>>> from nltk.util import ngrams
>>> list(ngrams([1,2,3,4,5], 3))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
Wrap with list for a list version of this function. Set pad_left
or pad_right to true in order to get additional ngrams:
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, None)]
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>', right_pad_symbol='</s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
:param sequence: the source data to be converted into ngrams
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:param left_pad_symbol: the symbol to use for left padding (default is None)
:type left_pad_symbol: any
:param right_pad_symbol: the symbol to use for right padding (default is None)
:type right_pad_symbol: any
:rtype: sequence or iter
"""
sequence = pad_sequence(sequence, n, **kwargs)
# Creates the sliding window, of n no. of items.
# `iterables` is a tuple of iterables where each iterable is a window of n items.
iterables = tee(sequence, n)
for i, sub_iterable in enumerate(iterables): # For each window,
for _ in range(i): # iterate through every order of ngrams
next(sub_iterable, None) # generate the ngrams within the window.
return zip(*iterables) # Unpack and flattens the iterables.
The provided code snippet includes necessary dependencies for implementing the `bigrams` function. Write a Python function `def bigrams(sequence, **kwargs)` to solve the following problem:
Return the bigrams generated from a sequence of items, as an iterator. For example: >>> from nltk.util import bigrams >>> list(bigrams([1,2,3,4,5])) [(1, 2), (2, 3), (3, 4), (4, 5)] Use bigrams for a list version of this function. :param sequence: the source data to be converted into bigrams :type sequence: sequence or iter :rtype: iter(tuple)
Here is the function:
def bigrams(sequence, **kwargs):
"""
Return the bigrams generated from a sequence of items, as an iterator.
For example:
>>> from nltk.util import bigrams
>>> list(bigrams([1,2,3,4,5]))
[(1, 2), (2, 3), (3, 4), (4, 5)]
Use bigrams for a list version of this function.
:param sequence: the source data to be converted into bigrams
:type sequence: sequence or iter
:rtype: iter(tuple)
"""
yield from ngrams(sequence, 2, **kwargs) | Return the bigrams generated from a sequence of items, as an iterator. For example: >>> from nltk.util import bigrams >>> list(bigrams([1,2,3,4,5])) [(1, 2), (2, 3), (3, 4), (4, 5)] Use bigrams for a list version of this function. :param sequence: the source data to be converted into bigrams :type sequence: sequence or iter :rtype: iter(tuple) |
170,541 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
def ngrams(sequence, n, **kwargs):
"""
Return the ngrams generated from a sequence of items, as an iterator.
For example:
>>> from nltk.util import ngrams
>>> list(ngrams([1,2,3,4,5], 3))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
Wrap with list for a list version of this function. Set pad_left
or pad_right to true in order to get additional ngrams:
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, None)]
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>', right_pad_symbol='</s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
:param sequence: the source data to be converted into ngrams
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:param left_pad_symbol: the symbol to use for left padding (default is None)
:type left_pad_symbol: any
:param right_pad_symbol: the symbol to use for right padding (default is None)
:type right_pad_symbol: any
:rtype: sequence or iter
"""
sequence = pad_sequence(sequence, n, **kwargs)
# Creates the sliding window, of n no. of items.
# `iterables` is a tuple of iterables where each iterable is a window of n items.
iterables = tee(sequence, n)
for i, sub_iterable in enumerate(iterables): # For each window,
for _ in range(i): # iterate through every order of ngrams
next(sub_iterable, None) # generate the ngrams within the window.
return zip(*iterables) # Unpack and flattens the iterables.
The provided code snippet includes necessary dependencies for implementing the `trigrams` function. Write a Python function `def trigrams(sequence, **kwargs)` to solve the following problem:
Return the trigrams generated from a sequence of items, as an iterator. For example: >>> from nltk.util import trigrams >>> list(trigrams([1,2,3,4,5])) [(1, 2, 3), (2, 3, 4), (3, 4, 5)] Use trigrams for a list version of this function. :param sequence: the source data to be converted into trigrams :type sequence: sequence or iter :rtype: iter(tuple)
Here is the function:
def trigrams(sequence, **kwargs):
"""
Return the trigrams generated from a sequence of items, as an iterator.
For example:
>>> from nltk.util import trigrams
>>> list(trigrams([1,2,3,4,5]))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
Use trigrams for a list version of this function.
:param sequence: the source data to be converted into trigrams
:type sequence: sequence or iter
:rtype: iter(tuple)
"""
yield from ngrams(sequence, 3, **kwargs) | Return the trigrams generated from a sequence of items, as an iterator. For example: >>> from nltk.util import trigrams >>> list(trigrams([1,2,3,4,5])) [(1, 2, 3), (2, 3, 4), (3, 4, 5)] Use trigrams for a list version of this function. :param sequence: the source data to be converted into trigrams :type sequence: sequence or iter :rtype: iter(tuple) |
170,542 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
def pad_sequence(
sequence,
n,
pad_left=False,
pad_right=False,
left_pad_symbol=None,
right_pad_symbol=None,
):
"""
Returns a padded sequence of items before ngram extraction.
>>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>', right_pad_symbol='</s>'))
['<s>', 1, 2, 3, 4, 5, '</s>']
>>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))
['<s>', 1, 2, 3, 4, 5]
>>> list(pad_sequence([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))
[1, 2, 3, 4, 5, '</s>']
:param sequence: the source data to be padded
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:param left_pad_symbol: the symbol to use for left padding (default is None)
:type left_pad_symbol: any
:param right_pad_symbol: the symbol to use for right padding (default is None)
:type right_pad_symbol: any
:rtype: sequence or iter
"""
sequence = iter(sequence)
if pad_left:
sequence = chain((left_pad_symbol,) * (n - 1), sequence)
if pad_right:
sequence = chain(sequence, (right_pad_symbol,) * (n - 1))
return sequence
def ngrams(sequence, n, **kwargs):
"""
Return the ngrams generated from a sequence of items, as an iterator.
For example:
>>> from nltk.util import ngrams
>>> list(ngrams([1,2,3,4,5], 3))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
Wrap with list for a list version of this function. Set pad_left
or pad_right to true in order to get additional ngrams:
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, None)]
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>', right_pad_symbol='</s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
:param sequence: the source data to be converted into ngrams
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:param left_pad_symbol: the symbol to use for left padding (default is None)
:type left_pad_symbol: any
:param right_pad_symbol: the symbol to use for right padding (default is None)
:type right_pad_symbol: any
:rtype: sequence or iter
"""
sequence = pad_sequence(sequence, n, **kwargs)
# Creates the sliding window, of n no. of items.
# `iterables` is a tuple of iterables where each iterable is a window of n items.
iterables = tee(sequence, n)
for i, sub_iterable in enumerate(iterables): # For each window,
for _ in range(i): # iterate through every order of ngrams
next(sub_iterable, None) # generate the ngrams within the window.
return zip(*iterables) # Unpack and flattens the iterables.
def combinations(iterable: Iterable[_T], r: Literal[2]) -> Iterator[Tuple[_T, _T]]: ...
def combinations(iterable: Iterable[_T], r: Literal[3]) -> Iterator[Tuple[_T, _T, _T]]: ...
def combinations(iterable: Iterable[_T], r: Literal[4]) -> Iterator[Tuple[_T, _T, _T, _T]]: ...
def combinations(iterable: Iterable[_T], r: Literal[5]) -> Iterator[Tuple[_T, _T, _T, _T, _T]]: ...
def combinations(iterable: Iterable[_T], r: int) -> Iterator[Tuple[_T, ...]]: ...
The provided code snippet includes necessary dependencies for implementing the `skipgrams` function. Write a Python function `def skipgrams(sequence, n, k, **kwargs)` to solve the following problem:
Returns all possible skipgrams generated from a sequence of items, as an iterator. Skipgrams are ngrams that allows tokens to be skipped. Refer to http://homepages.inf.ed.ac.uk/ballison/pdf/lrec_skipgrams.pdf >>> sent = "Insurgents killed in ongoing fighting".split() >>> list(skipgrams(sent, 2, 2)) [('Insurgents', 'killed'), ('Insurgents', 'in'), ('Insurgents', 'ongoing'), ('killed', 'in'), ('killed', 'ongoing'), ('killed', 'fighting'), ('in', 'ongoing'), ('in', 'fighting'), ('ongoing', 'fighting')] >>> list(skipgrams(sent, 3, 2)) [('Insurgents', 'killed', 'in'), ('Insurgents', 'killed', 'ongoing'), ('Insurgents', 'killed', 'fighting'), ('Insurgents', 'in', 'ongoing'), ('Insurgents', 'in', 'fighting'), ('Insurgents', 'ongoing', 'fighting'), ('killed', 'in', 'ongoing'), ('killed', 'in', 'fighting'), ('killed', 'ongoing', 'fighting'), ('in', 'ongoing', 'fighting')] :param sequence: the source data to be converted into trigrams :type sequence: sequence or iter :param n: the degree of the ngrams :type n: int :param k: the skip distance :type k: int :rtype: iter(tuple)
Here is the function:
def skipgrams(sequence, n, k, **kwargs):
"""
Returns all possible skipgrams generated from a sequence of items, as an iterator.
Skipgrams are ngrams that allows tokens to be skipped.
Refer to http://homepages.inf.ed.ac.uk/ballison/pdf/lrec_skipgrams.pdf
>>> sent = "Insurgents killed in ongoing fighting".split()
>>> list(skipgrams(sent, 2, 2))
[('Insurgents', 'killed'), ('Insurgents', 'in'), ('Insurgents', 'ongoing'), ('killed', 'in'), ('killed', 'ongoing'), ('killed', 'fighting'), ('in', 'ongoing'), ('in', 'fighting'), ('ongoing', 'fighting')]
>>> list(skipgrams(sent, 3, 2))
[('Insurgents', 'killed', 'in'), ('Insurgents', 'killed', 'ongoing'), ('Insurgents', 'killed', 'fighting'), ('Insurgents', 'in', 'ongoing'), ('Insurgents', 'in', 'fighting'), ('Insurgents', 'ongoing', 'fighting'), ('killed', 'in', 'ongoing'), ('killed', 'in', 'fighting'), ('killed', 'ongoing', 'fighting'), ('in', 'ongoing', 'fighting')]
:param sequence: the source data to be converted into trigrams
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param k: the skip distance
:type k: int
:rtype: iter(tuple)
"""
# Pads the sequence as desired by **kwargs.
if "pad_left" in kwargs or "pad_right" in kwargs:
sequence = pad_sequence(sequence, n, **kwargs)
# Note when iterating through the ngrams, the pad_right here is not
# the **kwargs padding, it's for the algorithm to detect the SENTINEL
# object on the right pad to stop inner loop.
SENTINEL = object()
for ngram in ngrams(sequence, n + k, pad_right=True, right_pad_symbol=SENTINEL):
head = ngram[:1]
tail = ngram[1:]
for skip_tail in combinations(tail, n - 1):
if skip_tail[-1] is SENTINEL:
continue
yield head + skip_tail | Returns all possible skipgrams generated from a sequence of items, as an iterator. Skipgrams are ngrams that allows tokens to be skipped. Refer to http://homepages.inf.ed.ac.uk/ballison/pdf/lrec_skipgrams.pdf >>> sent = "Insurgents killed in ongoing fighting".split() >>> list(skipgrams(sent, 2, 2)) [('Insurgents', 'killed'), ('Insurgents', 'in'), ('Insurgents', 'ongoing'), ('killed', 'in'), ('killed', 'ongoing'), ('killed', 'fighting'), ('in', 'ongoing'), ('in', 'fighting'), ('ongoing', 'fighting')] >>> list(skipgrams(sent, 3, 2)) [('Insurgents', 'killed', 'in'), ('Insurgents', 'killed', 'ongoing'), ('Insurgents', 'killed', 'fighting'), ('Insurgents', 'in', 'ongoing'), ('Insurgents', 'in', 'fighting'), ('Insurgents', 'ongoing', 'fighting'), ('killed', 'in', 'ongoing'), ('killed', 'in', 'fighting'), ('killed', 'ongoing', 'fighting'), ('in', 'ongoing', 'fighting')] :param sequence: the source data to be converted into trigrams :type sequence: sequence or iter :param n: the degree of the ngrams :type n: int :param k: the skip distance :type k: int :rtype: iter(tuple) |
170,543 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
The provided code snippet includes necessary dependencies for implementing the `binary_search_file` function. Write a Python function `def binary_search_file(file, key, cache=None, cacheDepth=-1)` to solve the following problem:
Return the line from the file with first word key. Searches through a sorted file using the binary search algorithm. :type file: file :param file: the file to be searched through. :type key: str :param key: the identifier we are searching for.
Here is the function:
def binary_search_file(file, key, cache=None, cacheDepth=-1):
"""
Return the line from the file with first word key.
Searches through a sorted file using the binary search algorithm.
:type file: file
:param file: the file to be searched through.
:type key: str
:param key: the identifier we are searching for.
"""
key = key + " "
keylen = len(key)
start = 0
currentDepth = 0
if hasattr(file, "name"):
end = os.stat(file.name).st_size - 1
else:
file.seek(0, 2)
end = file.tell() - 1
file.seek(0)
if cache is None:
cache = {}
while start < end:
lastState = start, end
middle = (start + end) // 2
if cache.get(middle):
offset, line = cache[middle]
else:
line = ""
while True:
file.seek(max(0, middle - 1))
if middle > 0:
file.discard_line()
offset = file.tell()
line = file.readline()
if line != "":
break
# at EOF; try to find start of the last line
middle = (start + middle) // 2
if middle == end - 1:
return None
if currentDepth < cacheDepth:
cache[middle] = (offset, line)
if offset > end:
assert end != middle - 1, "infinite loop"
end = middle - 1
elif line[:keylen] == key:
return line
elif line > key:
assert end != middle - 1, "infinite loop"
end = middle - 1
elif line < key:
start = offset + len(line) - 1
currentDepth += 1
thisState = start, end
if lastState == thisState:
# Detects the condition where we're searching past the end
# of the file, which is otherwise difficult to detect
return None
return None | Return the line from the file with first word key. Searches through a sorted file using the binary search algorithm. :type file: file :param file: the file to be searched through. :type key: str :param key: the identifier we are searching for. |
170,544 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
The provided code snippet includes necessary dependencies for implementing the `set_proxy` function. Write a Python function `def set_proxy(proxy, user=None, password="")` to solve the following problem:
Set the HTTP proxy for Python to download through. If ``proxy`` is None then tries to set proxy from environment or system settings. :param proxy: The HTTP proxy server to use. For example: 'http://proxy.example.com:3128/' :param user: The username to authenticate with. Use None to disable authentication. :param password: The password to authenticate with.
Here is the function:
def set_proxy(proxy, user=None, password=""):
"""
Set the HTTP proxy for Python to download through.
If ``proxy`` is None then tries to set proxy from environment or system
settings.
:param proxy: The HTTP proxy server to use. For example:
'http://proxy.example.com:3128/'
:param user: The username to authenticate with. Use None to disable
authentication.
:param password: The password to authenticate with.
"""
if proxy is None:
# Try and find the system proxy settings
try:
proxy = getproxies()["http"]
except KeyError as e:
raise ValueError("Could not detect default proxy settings") from e
# Set up the proxy handler
proxy_handler = ProxyHandler({"https": proxy, "http": proxy})
opener = build_opener(proxy_handler)
if user is not None:
# Set up basic proxy authentication if provided
password_manager = HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(realm=None, uri=proxy, user=user, passwd=password)
opener.add_handler(ProxyBasicAuthHandler(password_manager))
opener.add_handler(ProxyDigestAuthHandler(password_manager))
# Override the existing url opener
install_opener(opener) | Set the HTTP proxy for Python to download through. If ``proxy`` is None then tries to set proxy from environment or system settings. :param proxy: The HTTP proxy server to use. For example: 'http://proxy.example.com:3128/' :param user: The username to authenticate with. Use None to disable authentication. :param password: The password to authenticate with. |
170,545 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
The provided code snippet includes necessary dependencies for implementing the `elementtree_indent` function. Write a Python function `def elementtree_indent(elem, level=0)` to solve the following problem:
Recursive function to indent an ElementTree._ElementInterface used for pretty printing. Run indent on elem and then output in the normal way. :param elem: element to be indented. will be modified. :type elem: ElementTree._ElementInterface :param level: level of indentation for this element :type level: nonnegative integer :rtype: ElementTree._ElementInterface :return: Contents of elem indented to reflect its structure
Here is the function:
def elementtree_indent(elem, level=0):
"""
Recursive function to indent an ElementTree._ElementInterface
used for pretty printing. Run indent on elem and then output
in the normal way.
:param elem: element to be indented. will be modified.
:type elem: ElementTree._ElementInterface
:param level: level of indentation for this element
:type level: nonnegative integer
:rtype: ElementTree._ElementInterface
:return: Contents of elem indented to reflect its structure
"""
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for elem in elem:
elementtree_indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i | Recursive function to indent an ElementTree._ElementInterface used for pretty printing. Run indent on elem and then output in the normal way. :param elem: element to be indented. will be modified. :type elem: ElementTree._ElementInterface :param level: level of indentation for this element :type level: nonnegative integer :rtype: ElementTree._ElementInterface :return: Contents of elem indented to reflect its structure |
170,546 | import inspect
import locale
import os
import pydoc
import re
import textwrap
import warnings
from collections import defaultdict, deque
from itertools import chain, combinations, islice, tee
from pprint import pprint
from urllib.request import (
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler,
ProxyDigestAuthHandler,
ProxyHandler,
build_opener,
getproxies,
install_opener,
)
from nltk.collections import *
from nltk.internals import deprecated, raise_unorderable_types, slice_bounds
def tee(iterable: Iterable[_T], n: int = ...) -> Tuple[Iterator[_T], ...]: ...
The provided code snippet includes necessary dependencies for implementing the `pairwise` function. Write a Python function `def pairwise(iterable)` to solve the following problem:
s -> (s0,s1), (s1,s2), (s2, s3), ...
Here is the function:
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = tee(iterable)
next(b, None)
return zip(a, b) | s -> (s0,s1), (s1,s2), (s2, s3), ... |
170,547 | import os
import tempfile
from nltk.inference.api import BaseModelBuilderCommand, ModelBuilder
from nltk.inference.prover9 import Prover9CommandParent, Prover9Parent
from nltk.sem import Expression, Valuation
from nltk.sem.logic import is_indvar
class MaceCommand(Prover9CommandParent, BaseModelBuilderCommand):
"""
A ``MaceCommand`` specific to the ``Mace`` model builder. It contains
a print_assumptions() method that is used to print the list
of assumptions in multiple formats.
"""
_interpformat_bin = None
def __init__(self, goal=None, assumptions=None, max_models=500, model_builder=None):
"""
:param goal: Input expression to prove
:type goal: sem.Expression
:param assumptions: Input expressions to use as assumptions in
the proof.
:type assumptions: list(sem.Expression)
:param max_models: The maximum number of models that Mace will try before
simply returning false. (Use 0 for no maximum.)
:type max_models: int
"""
if model_builder is not None:
assert isinstance(model_builder, Mace)
else:
model_builder = Mace(max_models)
BaseModelBuilderCommand.__init__(self, model_builder, goal, assumptions)
def valuation(mbc):
return mbc.model("valuation")
def _convert2val(self, valuation_str):
"""
Transform the output file into an NLTK-style Valuation.
:return: A model if one is generated; None otherwise.
:rtype: sem.Valuation
"""
valuation_standard_format = self._transform_output(valuation_str, "standard")
val = []
for line in valuation_standard_format.splitlines(False):
l = line.strip()
if l.startswith("interpretation"):
# find the number of entities in the model
num_entities = int(l[l.index("(") + 1 : l.index(",")].strip())
elif l.startswith("function") and l.find("_") == -1:
# replace the integer identifier with a corresponding alphabetic character
name = l[l.index("(") + 1 : l.index(",")].strip()
if is_indvar(name):
name = name.upper()
value = int(l[l.index("[") + 1 : l.index("]")].strip())
val.append((name, MaceCommand._make_model_var(value)))
elif l.startswith("relation"):
l = l[l.index("(") + 1 :]
if "(" in l:
# relation is not nullary
name = l[: l.index("(")].strip()
values = [
int(v.strip())
for v in l[l.index("[") + 1 : l.index("]")].split(",")
]
val.append(
(name, MaceCommand._make_relation_set(num_entities, values))
)
else:
# relation is nullary
name = l[: l.index(",")].strip()
value = int(l[l.index("[") + 1 : l.index("]")].strip())
val.append((name, value == 1))
return Valuation(val)
def _make_relation_set(num_entities, values):
"""
Convert a Mace4-style relation table into a dictionary.
:param num_entities: the number of entities in the model; determines the row length in the table.
:type num_entities: int
:param values: a list of 1's and 0's that represent whether a relation holds in a Mace4 model.
:type values: list of int
"""
r = set()
for position in [pos for (pos, v) in enumerate(values) if v == 1]:
r.add(
tuple(MaceCommand._make_relation_tuple(position, values, num_entities))
)
return r
def _make_relation_tuple(position, values, num_entities):
if len(values) == 1:
return []
else:
sublist_size = len(values) // num_entities
sublist_start = position // sublist_size
sublist_position = int(position % sublist_size)
sublist = values[
sublist_start * sublist_size : (sublist_start + 1) * sublist_size
]
return [
MaceCommand._make_model_var(sublist_start)
] + MaceCommand._make_relation_tuple(
sublist_position, sublist, num_entities
)
def _make_model_var(value):
"""
Pick an alphabetic character as identifier for an entity in the model.
:param value: where to index into the list of characters
:type value: int
"""
letter = [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
][value]
num = value // 26
return letter + str(num) if num > 0 else letter
def _decorate_model(self, valuation_str, format):
"""
Print out a Mace4 model using any Mace4 ``interpformat`` format.
See https://www.cs.unm.edu/~mccune/mace4/manual/ for details.
:param valuation_str: str with the model builder's output
:param format: str indicating the format for displaying
models. Defaults to 'standard' format.
:return: str
"""
if not format:
return valuation_str
elif format == "valuation":
return self._convert2val(valuation_str)
else:
return self._transform_output(valuation_str, format)
def _transform_output(self, valuation_str, format):
"""
Transform the output file into any Mace4 ``interpformat`` format.
:param format: Output format for displaying models.
:type format: str
"""
if format in [
"standard",
"standard2",
"portable",
"tabular",
"raw",
"cooked",
"xml",
"tex",
]:
return self._call_interpformat(valuation_str, [format])[0]
else:
raise LookupError("The specified format does not exist")
def _call_interpformat(self, input_str, args=[], verbose=False):
"""
Call the ``interpformat`` binary with the given input.
:param input_str: A string whose contents are used as stdin.
:param args: A list of command-line arguments.
:return: A tuple (stdout, returncode)
:see: ``config_prover9``
"""
if self._interpformat_bin is None:
self._interpformat_bin = self._modelbuilder._find_binary(
"interpformat", verbose
)
return self._modelbuilder._call(
input_str, self._interpformat_bin, args, verbose
)
def test_make_relation_set():
print(
MaceCommand._make_relation_set(num_entities=3, values=[1, 0, 1])
== {("c",), ("a",)}
)
print(
MaceCommand._make_relation_set(
num_entities=3, values=[0, 0, 0, 0, 0, 0, 1, 0, 0]
)
== {("c", "a")}
)
print(
MaceCommand._make_relation_set(num_entities=2, values=[0, 0, 1, 0, 0, 0, 1, 0])
== {("a", "b", "a"), ("b", "b", "a")}
) | null |
170,548 | import os
import tempfile
from nltk.inference.api import BaseModelBuilderCommand, ModelBuilder
from nltk.inference.prover9 import Prover9CommandParent, Prover9Parent
from nltk.sem import Expression, Valuation
from nltk.sem.logic import is_indvar
def test_model_found(arguments):
"""
Try some proofs and exhibit the results.
"""
for (goal, assumptions) in arguments:
g = Expression.fromstring(goal)
alist = [lp.parse(a) for a in assumptions]
m = MaceCommand(g, assumptions=alist, max_models=50)
found = m.build_model()
for a in alist:
print(" %s" % a)
print(f"|- {g}: {decode_result(found)}\n")
def test_build_model(arguments):
"""
Try to build a ``nltk.sem.Valuation``.
"""
g = Expression.fromstring("all x.man(x)")
alist = [
Expression.fromstring(a)
for a in [
"man(John)",
"man(Socrates)",
"man(Bill)",
"some x.(-(x = John) & man(x) & sees(John,x))",
"some x.(-(x = Bill) & man(x))",
"all x.some y.(man(x) -> gives(Socrates,x,y))",
]
]
m = MaceCommand(g, assumptions=alist)
m.build_model()
spacer()
print("Assumptions and Goal")
spacer()
for a in alist:
print(" %s" % a)
print(f"|- {g}: {decode_result(m.build_model())}\n")
spacer()
# print(m.model('standard'))
# print(m.model('cooked'))
print("Valuation")
spacer()
print(m.valuation, "\n")
def test_transform_output(argument_pair):
"""
Transform the model into various Mace4 ``interpformat`` formats.
"""
g = Expression.fromstring(argument_pair[0])
alist = [lp.parse(a) for a in argument_pair[1]]
m = MaceCommand(g, assumptions=alist)
m.build_model()
for a in alist:
print(" %s" % a)
print(f"|- {g}: {m.build_model()}\n")
for format in ["standard", "portable", "xml", "cooked"]:
spacer()
print("Using '%s' format" % format)
spacer()
print(m.model(format=format))
arguments = [
("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]),
("(not mortal(Socrates))", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]),
]
def demo():
test_model_found(arguments)
test_build_model(arguments)
test_transform_output(arguments[1]) | null |
170,549 | from nltk.inference.api import BaseProverCommand, Prover
from nltk.internals import Counter
from nltk.sem.logic import (
AbstractVariableExpression,
AllExpression,
AndExpression,
ApplicationExpression,
EqualityExpression,
ExistsExpression,
Expression,
FunctionVariableExpression,
IffExpression,
ImpExpression,
LambdaExpression,
NegatedExpression,
OrExpression,
Variable,
VariableExpression,
unique_variable,
)
def testTableauProver():
tableau_test("P | -P")
tableau_test("P & -P")
tableau_test("Q", ["P", "(P -> Q)"])
tableau_test("man(x)")
tableau_test("(man(x) -> man(x))")
tableau_test("(man(x) -> --man(x))")
tableau_test("-(man(x) and -man(x))")
tableau_test("(man(x) or -man(x))")
tableau_test("(man(x) -> man(x))")
tableau_test("-(man(x) and -man(x))")
tableau_test("(man(x) or -man(x))")
tableau_test("(man(x) -> man(x))")
tableau_test("(man(x) iff man(x))")
tableau_test("-(man(x) iff -man(x))")
tableau_test("all x.man(x)")
tableau_test("all x.all y.((x = y) -> (y = x))")
tableau_test("all x.all y.all z.(((x = y) & (y = z)) -> (x = z))")
# tableau_test('-all x.some y.F(x,y) & some x.all y.(-F(x,y))')
# tableau_test('some x.all y.sees(x,y)')
p1 = "all x.(man(x) -> mortal(x))"
p2 = "man(Socrates)"
c = "mortal(Socrates)"
tableau_test(c, [p1, p2])
p1 = "all x.(man(x) -> walks(x))"
p2 = "man(John)"
c = "some y.walks(y)"
tableau_test(c, [p1, p2])
p = "((x = y) & walks(y))"
c = "walks(x)"
tableau_test(c, [p])
p = "((x = y) & ((y = z) & (z = w)))"
c = "(x = w)"
tableau_test(c, [p])
p = "some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))"
c = "some e0.walk(e0,mary)"
tableau_test(c, [p])
c = "(exists x.exists z3.((x = Mary) & ((z3 = John) & sees(z3,x))) <-> exists x.exists z4.((x = John) & ((z4 = Mary) & sees(x,z4))))"
tableau_test(c)
def testHigherOrderTableauProver():
tableau_test("believe(j, -lie(b))", ["believe(j, -lie(b) & -cheat(b))"])
tableau_test("believe(j, lie(b) & cheat(b))", ["believe(j, lie(b))"])
tableau_test(
"believe(j, lie(b))", ["lie(b)"]
) # how do we capture that John believes all things that are true
tableau_test(
"believe(j, know(b, cheat(b)))",
["believe(j, know(b, lie(b)) & know(b, steals(b) & cheat(b)))"],
)
tableau_test("P(Q(y), R(y) & R(z))", ["P(Q(x) & Q(y), R(y) & R(z))"])
tableau_test("believe(j, cheat(b) & lie(b))", ["believe(j, lie(b) & cheat(b))"])
tableau_test("believe(j, -cheat(b) & -lie(b))", ["believe(j, -lie(b) & -cheat(b))"])
def demo():
testTableauProver()
testHigherOrderTableauProver() | null |
170,550 | import operator
from collections import defaultdict
from functools import reduce
from nltk.inference.api import BaseProverCommand, Prover
from nltk.sem import skolemize
from nltk.sem.logic import (
AndExpression,
ApplicationExpression,
EqualityExpression,
Expression,
IndividualVariableExpression,
NegatedExpression,
OrExpression,
Variable,
VariableExpression,
is_indvar,
unique_variable,
)
def _iterate_second(first, second, bindings, used, skipped, finalize_method, debug):
"""
This method facilitates movement through the terms of 'other'
"""
debug.line(f"unify({first},{second}) {bindings}")
if not len(first) or not len(second): # if no more recursions can be performed
return finalize_method(first, second, bindings, used, skipped, debug)
else:
# skip this possible pairing and move to the next
newskipped = (skipped[0], skipped[1] + [second[0]])
result = _iterate_second(
first, second[1:], bindings, used, newskipped, finalize_method, debug + 1
)
try:
newbindings, newused, unused = _unify_terms(
first[0], second[0], bindings, used
)
# Unification found, so progress with this line of unification
# put skipped and unused terms back into play for later unification.
newfirst = first[1:] + skipped[0] + unused[0]
newsecond = second[1:] + skipped[1] + unused[1]
result += _iterate_second(
newfirst,
newsecond,
newbindings,
newused,
([], []),
finalize_method,
debug + 1,
)
except BindingException:
# the atoms could not be unified,
pass
return result
def _unify_terms(a, b, bindings=None, used=None):
"""
This method attempts to unify two terms. Two expressions are unifiable
if there exists a substitution function S such that S(a) == S(-b).
:param a: ``Expression``
:param b: ``Expression``
:param bindings: ``BindingDict`` a starting set of bindings with which
the unification must be consistent
:return: ``BindingDict`` A dictionary of the bindings required to unify
:raise ``BindingException``: If the terms cannot be unified
"""
assert isinstance(a, Expression)
assert isinstance(b, Expression)
if bindings is None:
bindings = BindingDict()
if used is None:
used = ([], [])
# Use resolution
if isinstance(a, NegatedExpression) and isinstance(b, ApplicationExpression):
newbindings = most_general_unification(a.term, b, bindings)
newused = (used[0] + [a], used[1] + [b])
unused = ([], [])
elif isinstance(a, ApplicationExpression) and isinstance(b, NegatedExpression):
newbindings = most_general_unification(a, b.term, bindings)
newused = (used[0] + [a], used[1] + [b])
unused = ([], [])
# Use demodulation
elif isinstance(a, EqualityExpression):
newbindings = BindingDict([(a.first.variable, a.second)])
newused = (used[0] + [a], used[1])
unused = ([], [b])
elif isinstance(b, EqualityExpression):
newbindings = BindingDict([(b.first.variable, b.second)])
newused = (used[0], used[1] + [b])
unused = ([a], [])
else:
raise BindingException((a, b))
return newbindings, newused, unused
class BindingException(Exception):
def __init__(self, arg):
if isinstance(arg, tuple):
Exception.__init__(self, "'%s' cannot be bound to '%s'" % arg)
else:
Exception.__init__(self, arg)
The provided code snippet includes necessary dependencies for implementing the `_iterate_first` function. Write a Python function `def _iterate_first(first, second, bindings, used, skipped, finalize_method, debug)` to solve the following problem:
This method facilitates movement through the terms of 'self'
Here is the function:
def _iterate_first(first, second, bindings, used, skipped, finalize_method, debug):
"""
This method facilitates movement through the terms of 'self'
"""
debug.line(f"unify({first},{second}) {bindings}")
if not len(first) or not len(second): # if no more recursions can be performed
return finalize_method(first, second, bindings, used, skipped, debug)
else:
# explore this 'self' atom
result = _iterate_second(
first, second, bindings, used, skipped, finalize_method, debug + 1
)
# skip this possible 'self' atom
newskipped = (skipped[0] + [first[0]], skipped[1])
result += _iterate_first(
first[1:], second, bindings, used, newskipped, finalize_method, debug + 1
)
try:
newbindings, newused, unused = _unify_terms(
first[0], second[0], bindings, used
)
# Unification found, so progress with this line of unification
# put skipped and unused terms back into play for later unification.
newfirst = first[1:] + skipped[0] + unused[0]
newsecond = second[1:] + skipped[1] + unused[1]
result += _iterate_first(
newfirst,
newsecond,
newbindings,
newused,
([], []),
finalize_method,
debug + 1,
)
except BindingException:
# the atoms could not be unified,
pass
return result | This method facilitates movement through the terms of 'self' |
170,551 | import operator
from collections import defaultdict
from functools import reduce
from nltk.inference.api import BaseProverCommand, Prover
from nltk.sem import skolemize
from nltk.sem.logic import (
AndExpression,
ApplicationExpression,
EqualityExpression,
Expression,
IndividualVariableExpression,
NegatedExpression,
OrExpression,
Variable,
VariableExpression,
is_indvar,
unique_variable,
)
class Clause(list):
def __init__(self, data):
def unify(self, other, bindings=None, used=None, skipped=None, debug=False):
def isSubsetOf(self, other):
def subsumes(self, other):
def __getslice__(self, start, end):
def __sub__(self, other):
def __add__(self, other):
def is_tautology(self):
def free(self):
def replace(self, variable, expression):
def substitute_bindings(self, bindings):
def __str__(self):
def __repr__(self):
def _complete_unify_path(first, second, bindings, used, skipped, debug):
if used[0] or used[1]: # if bindings were made along the path
newclause = Clause(skipped[0] + skipped[1] + first + second)
debug.line(" -> New Clause: %s" % newclause)
return [newclause.substitute_bindings(bindings)]
else: # no bindings made means no unification occurred. so no result
debug.line(" -> End")
return [] | null |
170,552 | import operator
from collections import defaultdict
from functools import reduce
from nltk.inference.api import BaseProverCommand, Prover
from nltk.sem import skolemize
from nltk.sem.logic import (
AndExpression,
ApplicationExpression,
EqualityExpression,
Expression,
IndividualVariableExpression,
NegatedExpression,
OrExpression,
Variable,
VariableExpression,
is_indvar,
unique_variable,
)
def _subsumes_finalize(first, second, bindings, used, skipped, debug):
if not len(skipped[0]) and not len(first):
# If there are no skipped terms and no terms left in 'first', then
# all of the terms in the original 'self' were unified with terms
# in 'other'. Therefore, there exists a binding (this one) such that
# every term in self can be unified with a term in other, which
# is the definition of subsumption.
return [True]
else:
return [] | null |
170,553 | import operator
from collections import defaultdict
from functools import reduce
from nltk.inference.api import BaseProverCommand, Prover
from nltk.sem import skolemize
from nltk.sem.logic import (
AndExpression,
ApplicationExpression,
EqualityExpression,
Expression,
IndividualVariableExpression,
NegatedExpression,
OrExpression,
Variable,
VariableExpression,
is_indvar,
unique_variable,
)
class ResolutionProverCommand(BaseProverCommand):
def __init__(self, goal=None, assumptions=None, prover=None):
"""
:param goal: Input expression to prove
:type goal: sem.Expression
:param assumptions: Input expressions to use as assumptions in
the proof.
:type assumptions: list(sem.Expression)
"""
if prover is not None:
assert isinstance(prover, ResolutionProver)
else:
prover = ResolutionProver()
BaseProverCommand.__init__(self, prover, goal, assumptions)
self._clauses = None
def prove(self, verbose=False):
"""
Perform the actual proof. Store the result to prevent unnecessary
re-proving.
"""
if self._result is None:
self._result, clauses = self._prover._prove(
self.goal(), self.assumptions(), verbose
)
self._clauses = clauses
self._proof = ResolutionProverCommand._decorate_clauses(clauses)
return self._result
def find_answers(self, verbose=False):
self.prove(verbose)
answers = set()
answer_ex = VariableExpression(Variable(ResolutionProver.ANSWER_KEY))
for clause in self._clauses:
for term in clause:
if (
isinstance(term, ApplicationExpression)
and term.function == answer_ex
and not isinstance(term.argument, IndividualVariableExpression)
):
answers.add(term.argument)
return answers
def _decorate_clauses(clauses):
"""
Decorate the proof output.
"""
out = ""
max_clause_len = max(len(str(clause)) for clause in clauses)
max_seq_len = len(str(len(clauses)))
for i in range(len(clauses)):
parents = "A"
taut = ""
if clauses[i].is_tautology():
taut = "Tautology"
if clauses[i]._parents:
parents = str(clauses[i]._parents)
parents = " " * (max_clause_len - len(str(clauses[i])) + 1) + parents
seq = " " * (max_seq_len - len(str(i + 1))) + str(i + 1)
out += f"[{seq}] {clauses[i]} {parents} {taut}\n"
return out
def testResolutionProver():
resolution_test(r"man(x)")
resolution_test(r"(man(x) -> man(x))")
resolution_test(r"(man(x) -> --man(x))")
resolution_test(r"-(man(x) and -man(x))")
resolution_test(r"(man(x) or -man(x))")
resolution_test(r"(man(x) -> man(x))")
resolution_test(r"-(man(x) and -man(x))")
resolution_test(r"(man(x) or -man(x))")
resolution_test(r"(man(x) -> man(x))")
resolution_test(r"(man(x) iff man(x))")
resolution_test(r"-(man(x) iff -man(x))")
resolution_test("all x.man(x)")
resolution_test("-all x.some y.F(x,y) & some x.all y.(-F(x,y))")
resolution_test("some x.all y.sees(x,y)")
p1 = Expression.fromstring(r"all x.(man(x) -> mortal(x))")
p2 = Expression.fromstring(r"man(Socrates)")
c = Expression.fromstring(r"mortal(Socrates)")
print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}")
p1 = Expression.fromstring(r"all x.(man(x) -> walks(x))")
p2 = Expression.fromstring(r"man(John)")
c = Expression.fromstring(r"some y.walks(y)")
print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}")
p = Expression.fromstring(r"some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))")
c = Expression.fromstring(r"some e0.walk(e0,mary)")
print(f"{p} |- {c}: {ResolutionProver().prove(c, [p])}")
def test_clausify():
lexpr = Expression.fromstring
print(clausify(lexpr("P(x) | Q(x)")))
print(clausify(lexpr("(P(x) & Q(x)) | R(x)")))
print(clausify(lexpr("P(x) | (Q(x) & R(x))")))
print(clausify(lexpr("(P(x) & Q(x)) | (R(x) & S(x))")))
print(clausify(lexpr("P(x) | Q(x) | R(x)")))
print(clausify(lexpr("P(x) | (Q(x) & R(x)) | S(x)")))
print(clausify(lexpr("exists x.P(x) | Q(x)")))
print(clausify(lexpr("-(-P(x) & Q(x))")))
print(clausify(lexpr("P(x) <-> Q(x)")))
print(clausify(lexpr("-(P(x) <-> Q(x))")))
print(clausify(lexpr("-(all x.P(x))")))
print(clausify(lexpr("-(some x.P(x))")))
print(clausify(lexpr("some x.P(x)")))
print(clausify(lexpr("some x.all y.P(x,y)")))
print(clausify(lexpr("all y.some x.P(x,y)")))
print(clausify(lexpr("all z.all y.some x.P(x,y,z)")))
print(clausify(lexpr("all x.(all y.P(x,y) -> -all y.(Q(x,y) -> R(x,y)))")))
class Expression(SubstituteBindingsI):
"""This is the base abstract object for all logical expressions"""
_logic_parser = LogicParser()
_type_checking_logic_parser = LogicParser(type_check=True)
def fromstring(cls, s, type_check=False, signature=None):
if type_check:
return cls._type_checking_logic_parser.parse(s, signature)
else:
return cls._logic_parser.parse(s, signature)
def __call__(self, other, *additional):
accum = self.applyto(other)
for a in additional:
accum = accum(a)
return accum
def applyto(self, other):
assert isinstance(other, Expression), "%s is not an Expression" % other
return ApplicationExpression(self, other)
def __neg__(self):
return NegatedExpression(self)
def negate(self):
"""If this is a negated expression, remove the negation.
Otherwise add a negation."""
return -self
def __and__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return AndExpression(self, other)
def __or__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return OrExpression(self, other)
def __gt__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return ImpExpression(self, other)
def __lt__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return IffExpression(self, other)
def __eq__(self, other):
return NotImplemented
def __ne__(self, other):
return not self == other
def equiv(self, other, prover=None):
"""
Check for logical equivalence.
Pass the expression (self <-> other) to the theorem prover.
If the prover says it is valid, then the self and other are equal.
:param other: an ``Expression`` to check equality against
:param prover: a ``nltk.inference.api.Prover``
"""
assert isinstance(other, Expression), "%s is not an Expression" % other
if prover is None:
from nltk.inference import Prover9
prover = Prover9()
bicond = IffExpression(self.simplify(), other.simplify())
return prover.prove(bicond)
def __hash__(self):
return hash(repr(self))
def substitute_bindings(self, bindings):
expr = self
for var in expr.variables():
if var in bindings:
val = bindings[var]
if isinstance(val, Variable):
val = self.make_VariableExpression(val)
elif not isinstance(val, Expression):
raise ValueError(
"Can not substitute a non-expression "
"value into an expression: %r" % (val,)
)
# Substitute bindings in the target value.
val = val.substitute_bindings(bindings)
# Replace var w/ the target value.
expr = expr.replace(var, val)
return expr.simplify()
def typecheck(self, signature=None):
"""
Infer and check types. Raise exceptions if necessary.
:param signature: dict that maps variable names to types (or string
representations of types)
:return: the signature, plus any additional type mappings
"""
sig = defaultdict(list)
if signature:
for key in signature:
val = signature[key]
varEx = VariableExpression(Variable(key))
if isinstance(val, Type):
varEx.type = val
else:
varEx.type = read_type(val)
sig[key].append(varEx)
self._set_type(signature=sig)
return {key: sig[key][0].type for key in sig}
def findtype(self, variable):
"""
Find the type of the given variable as it is used in this expression.
For example, finding the type of "P" in "P(x) & Q(x,y)" yields "<e,t>"
:param variable: Variable
"""
raise NotImplementedError()
def _set_type(self, other_type=ANY_TYPE, signature=None):
"""
Set the type of this expression to be the given type. Raise type
exceptions where applicable.
:param other_type: Type
:param signature: dict(str -> list(AbstractVariableExpression))
"""
raise NotImplementedError()
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
"""
Replace every instance of 'variable' with 'expression'
:param variable: ``Variable`` The variable to replace
:param expression: ``Expression`` The expression with which to replace it
:param replace_bound: bool Should bound variables be replaced?
:param alpha_convert: bool Alpha convert automatically to avoid name clashes?
"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
assert isinstance(expression, Expression), (
"%s is not an Expression" % expression
)
return self.visit_structured(
lambda e: e.replace(variable, expression, replace_bound, alpha_convert),
self.__class__,
)
def normalize(self, newvars=None):
"""Rename auto-generated unique variables"""
def get_indiv_vars(e):
if isinstance(e, IndividualVariableExpression):
return {e}
elif isinstance(e, AbstractVariableExpression):
return set()
else:
return e.visit(
get_indiv_vars, lambda parts: reduce(operator.or_, parts, set())
)
result = self
for i, e in enumerate(sorted(get_indiv_vars(self), key=lambda e: e.variable)):
if isinstance(e, EventVariableExpression):
newVar = e.__class__(Variable("e0%s" % (i + 1)))
elif isinstance(e, IndividualVariableExpression):
newVar = e.__class__(Variable("z%s" % (i + 1)))
else:
newVar = e
result = result.replace(e.variable, newVar, True)
return result
def visit(self, function, combinator):
"""
Recursively visit subexpressions. Apply 'function' to each
subexpression and pass the result of each function application
to the 'combinator' for aggregation:
return combinator(map(function, self.subexpressions))
Bound variables are neither applied upon by the function nor given to
the combinator.
:param function: ``Function<Expression,T>`` to call on each subexpression
:param combinator: ``Function<list<T>,R>`` to combine the results of the
function calls
:return: result of combination ``R``
"""
raise NotImplementedError()
def visit_structured(self, function, combinator):
"""
Recursively visit subexpressions. Apply 'function' to each
subexpression and pass the result of each function application
to the 'combinator' for aggregation. The combinator must have
the same signature as the constructor. The function is not
applied to bound variables, but they are passed to the
combinator.
:param function: ``Function`` to call on each subexpression
:param combinator: ``Function`` with the same signature as the
constructor, to combine the results of the function calls
:return: result of combination
"""
return self.visit(function, lambda parts: combinator(*parts))
def __repr__(self):
return f"<{self.__class__.__name__} {self}>"
def __str__(self):
return self.str()
def variables(self):
"""
Return a set of all the variables for binding substitution.
The variables returned include all free (non-bound) individual
variables and any variable starting with '?' or '@'.
:return: set of ``Variable`` objects
"""
return self.free() | {
p for p in self.predicates() | self.constants() if re.match("^[?@]", p.name)
}
def free(self):
"""
Return a set of all the free (non-bound) variables. This includes
both individual and predicate variables, but not constants.
:return: set of ``Variable`` objects
"""
return self.visit(
lambda e: e.free(), lambda parts: reduce(operator.or_, parts, set())
)
def constants(self):
"""
Return a set of individual constants (non-predicates).
:return: set of ``Variable`` objects
"""
return self.visit(
lambda e: e.constants(), lambda parts: reduce(operator.or_, parts, set())
)
def predicates(self):
"""
Return a set of predicates (constants, not variables).
:return: set of ``Variable`` objects
"""
return self.visit(
lambda e: e.predicates(), lambda parts: reduce(operator.or_, parts, set())
)
def simplify(self):
"""
:return: beta-converted version of this expression
"""
return self.visit_structured(lambda e: e.simplify(), self.__class__)
def make_VariableExpression(self, variable):
return VariableExpression(variable)
def demo():
test_clausify()
print()
testResolutionProver()
print()
p = Expression.fromstring("man(x)")
print(ResolutionProverCommand(p, [p]).prove()) | null |
170,554 | import os
from abc import ABCMeta, abstractmethod
from functools import reduce
from operator import add, and_
from nltk.data import show_cfg
from nltk.inference.mace import MaceCommand
from nltk.inference.prover9 import Prover9Command
from nltk.parse import load_parser
from nltk.parse.malt import MaltParser
from nltk.sem.drt import AnaphoraResolutionException, resolve_anaphora
from nltk.sem.glue import DrtGlue
from nltk.sem.logic import Expression
from nltk.tag import RegexpTagger
class Expression(SubstituteBindingsI):
"""This is the base abstract object for all logical expressions"""
_logic_parser = LogicParser()
_type_checking_logic_parser = LogicParser(type_check=True)
def fromstring(cls, s, type_check=False, signature=None):
if type_check:
return cls._type_checking_logic_parser.parse(s, signature)
else:
return cls._logic_parser.parse(s, signature)
def __call__(self, other, *additional):
accum = self.applyto(other)
for a in additional:
accum = accum(a)
return accum
def applyto(self, other):
assert isinstance(other, Expression), "%s is not an Expression" % other
return ApplicationExpression(self, other)
def __neg__(self):
return NegatedExpression(self)
def negate(self):
"""If this is a negated expression, remove the negation.
Otherwise add a negation."""
return -self
def __and__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return AndExpression(self, other)
def __or__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return OrExpression(self, other)
def __gt__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return ImpExpression(self, other)
def __lt__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return IffExpression(self, other)
def __eq__(self, other):
return NotImplemented
def __ne__(self, other):
return not self == other
def equiv(self, other, prover=None):
"""
Check for logical equivalence.
Pass the expression (self <-> other) to the theorem prover.
If the prover says it is valid, then the self and other are equal.
:param other: an ``Expression`` to check equality against
:param prover: a ``nltk.inference.api.Prover``
"""
assert isinstance(other, Expression), "%s is not an Expression" % other
if prover is None:
from nltk.inference import Prover9
prover = Prover9()
bicond = IffExpression(self.simplify(), other.simplify())
return prover.prove(bicond)
def __hash__(self):
return hash(repr(self))
def substitute_bindings(self, bindings):
expr = self
for var in expr.variables():
if var in bindings:
val = bindings[var]
if isinstance(val, Variable):
val = self.make_VariableExpression(val)
elif not isinstance(val, Expression):
raise ValueError(
"Can not substitute a non-expression "
"value into an expression: %r" % (val,)
)
# Substitute bindings in the target value.
val = val.substitute_bindings(bindings)
# Replace var w/ the target value.
expr = expr.replace(var, val)
return expr.simplify()
def typecheck(self, signature=None):
"""
Infer and check types. Raise exceptions if necessary.
:param signature: dict that maps variable names to types (or string
representations of types)
:return: the signature, plus any additional type mappings
"""
sig = defaultdict(list)
if signature:
for key in signature:
val = signature[key]
varEx = VariableExpression(Variable(key))
if isinstance(val, Type):
varEx.type = val
else:
varEx.type = read_type(val)
sig[key].append(varEx)
self._set_type(signature=sig)
return {key: sig[key][0].type for key in sig}
def findtype(self, variable):
"""
Find the type of the given variable as it is used in this expression.
For example, finding the type of "P" in "P(x) & Q(x,y)" yields "<e,t>"
:param variable: Variable
"""
raise NotImplementedError()
def _set_type(self, other_type=ANY_TYPE, signature=None):
"""
Set the type of this expression to be the given type. Raise type
exceptions where applicable.
:param other_type: Type
:param signature: dict(str -> list(AbstractVariableExpression))
"""
raise NotImplementedError()
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
"""
Replace every instance of 'variable' with 'expression'
:param variable: ``Variable`` The variable to replace
:param expression: ``Expression`` The expression with which to replace it
:param replace_bound: bool Should bound variables be replaced?
:param alpha_convert: bool Alpha convert automatically to avoid name clashes?
"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
assert isinstance(expression, Expression), (
"%s is not an Expression" % expression
)
return self.visit_structured(
lambda e: e.replace(variable, expression, replace_bound, alpha_convert),
self.__class__,
)
def normalize(self, newvars=None):
"""Rename auto-generated unique variables"""
def get_indiv_vars(e):
if isinstance(e, IndividualVariableExpression):
return {e}
elif isinstance(e, AbstractVariableExpression):
return set()
else:
return e.visit(
get_indiv_vars, lambda parts: reduce(operator.or_, parts, set())
)
result = self
for i, e in enumerate(sorted(get_indiv_vars(self), key=lambda e: e.variable)):
if isinstance(e, EventVariableExpression):
newVar = e.__class__(Variable("e0%s" % (i + 1)))
elif isinstance(e, IndividualVariableExpression):
newVar = e.__class__(Variable("z%s" % (i + 1)))
else:
newVar = e
result = result.replace(e.variable, newVar, True)
return result
def visit(self, function, combinator):
"""
Recursively visit subexpressions. Apply 'function' to each
subexpression and pass the result of each function application
to the 'combinator' for aggregation:
return combinator(map(function, self.subexpressions))
Bound variables are neither applied upon by the function nor given to
the combinator.
:param function: ``Function<Expression,T>`` to call on each subexpression
:param combinator: ``Function<list<T>,R>`` to combine the results of the
function calls
:return: result of combination ``R``
"""
raise NotImplementedError()
def visit_structured(self, function, combinator):
"""
Recursively visit subexpressions. Apply 'function' to each
subexpression and pass the result of each function application
to the 'combinator' for aggregation. The combinator must have
the same signature as the constructor. The function is not
applied to bound variables, but they are passed to the
combinator.
:param function: ``Function`` to call on each subexpression
:param combinator: ``Function`` with the same signature as the
constructor, to combine the results of the function calls
:return: result of combination
"""
return self.visit(function, lambda parts: combinator(*parts))
def __repr__(self):
return f"<{self.__class__.__name__} {self}>"
def __str__(self):
return self.str()
def variables(self):
"""
Return a set of all the variables for binding substitution.
The variables returned include all free (non-bound) individual
variables and any variable starting with '?' or '@'.
:return: set of ``Variable`` objects
"""
return self.free() | {
p for p in self.predicates() | self.constants() if re.match("^[?@]", p.name)
}
def free(self):
"""
Return a set of all the free (non-bound) variables. This includes
both individual and predicate variables, but not constants.
:return: set of ``Variable`` objects
"""
return self.visit(
lambda e: e.free(), lambda parts: reduce(operator.or_, parts, set())
)
def constants(self):
"""
Return a set of individual constants (non-predicates).
:return: set of ``Variable`` objects
"""
return self.visit(
lambda e: e.constants(), lambda parts: reduce(operator.or_, parts, set())
)
def predicates(self):
"""
Return a set of predicates (constants, not variables).
:return: set of ``Variable`` objects
"""
return self.visit(
lambda e: e.predicates(), lambda parts: reduce(operator.or_, parts, set())
)
def simplify(self):
"""
:return: beta-converted version of this expression
"""
return self.visit_structured(lambda e: e.simplify(), self.__class__)
def make_VariableExpression(self, variable):
return VariableExpression(variable)
The provided code snippet includes necessary dependencies for implementing the `load_fol` function. Write a Python function `def load_fol(s)` to solve the following problem:
Temporarily duplicated from ``nltk.sem.util``. Convert a file of first order formulas into a list of ``Expression`` objects. :param s: the contents of the file :type s: str :return: a list of parsed formulas. :rtype: list(Expression)
Here is the function:
def load_fol(s):
"""
Temporarily duplicated from ``nltk.sem.util``.
Convert a file of first order formulas into a list of ``Expression`` objects.
:param s: the contents of the file
:type s: str
:return: a list of parsed formulas.
:rtype: list(Expression)
"""
statements = []
for linenum, line in enumerate(s.splitlines()):
line = line.strip()
if line.startswith("#") or line == "":
continue
try:
statements.append(Expression.fromstring(line))
except Exception as e:
raise ValueError(f"Unable to parse line {linenum}: {line}") from e
return statements | Temporarily duplicated from ``nltk.sem.util``. Convert a file of first order formulas into a list of ``Expression`` objects. :param s: the contents of the file :type s: str :return: a list of parsed formulas. :rtype: list(Expression) |
170,555 | import os
from abc import ABCMeta, abstractmethod
from functools import reduce
from operator import add, and_
from nltk.data import show_cfg
from nltk.inference.mace import MaceCommand
from nltk.inference.prover9 import Prover9Command
from nltk.parse import load_parser
from nltk.parse.malt import MaltParser
from nltk.sem.drt import AnaphoraResolutionException, resolve_anaphora
from nltk.sem.glue import DrtGlue
from nltk.sem.logic import Expression
from nltk.tag import RegexpTagger
def spacer(num=30):
print("-" * num) | null |
170,556 | import os
from abc import ABCMeta, abstractmethod
from functools import reduce
from operator import add, and_
from nltk.data import show_cfg
from nltk.inference.mace import MaceCommand
from nltk.inference.prover9 import Prover9Command
from nltk.parse import load_parser
from nltk.parse.malt import MaltParser
from nltk.sem.drt import AnaphoraResolutionException, resolve_anaphora
from nltk.sem.glue import DrtGlue
from nltk.sem.logic import Expression
from nltk.tag import RegexpTagger
class DrtGlueReadingCommand(ReadingCommand):
def __init__(self, semtype_file=None, remove_duplicates=False, depparser=None):
"""
:param semtype_file: name of file where grammar can be loaded
:param remove_duplicates: should duplicates be removed?
:param depparser: the dependency parser
"""
if semtype_file is None:
semtype_file = os.path.join(
"grammars", "sample_grammars", "drt_glue.semtype"
)
self._glue = DrtGlue(
semtype_file=semtype_file,
remove_duplicates=remove_duplicates,
depparser=depparser,
)
def parse_to_readings(self, sentence):
""":see: ReadingCommand.parse_to_readings()"""
return self._glue.parse_to_meaning(sentence)
def process_thread(self, sentence_readings):
""":see: ReadingCommand.process_thread()"""
try:
return [self.combine_readings(sentence_readings)]
except AnaphoraResolutionException:
return []
def combine_readings(self, readings):
""":see: ReadingCommand.combine_readings()"""
thread_reading = reduce(add, readings)
return resolve_anaphora(thread_reading.simplify())
def to_fol(self, expression):
""":see: ReadingCommand.to_fol()"""
return expression.fol()
def discourse_demo(reading_command=None):
"""
Illustrate the various methods of ``DiscourseTester``
"""
dt = DiscourseTester(
["A boxer walks", "Every boxer chases a girl"], reading_command
)
dt.models()
print()
# dt.grammar()
print()
dt.sentences()
print()
dt.readings()
print()
dt.readings(threaded=True)
print()
dt.models("d1")
dt.add_sentence("John is a boxer")
print()
dt.sentences()
print()
dt.readings(threaded=True)
print()
dt = DiscourseTester(
["A student dances", "Every student is a person"], reading_command
)
print()
dt.add_sentence("No person dances", consistchk=True)
print()
dt.readings()
print()
dt.retract_sentence("No person dances", verbose=True)
print()
dt.models()
print()
dt.readings("A person dances")
print()
dt.add_sentence("A person dances", informchk=True)
dt = DiscourseTester(
["Vincent is a boxer", "Fido is a boxer", "Vincent is married", "Fido barks"],
reading_command,
)
dt.readings(filter=True)
import nltk.data
background_file = os.path.join("grammars", "book_grammars", "background.fol")
background = nltk.data.load(background_file)
print()
dt.add_background(background, verbose=False)
dt.background()
print()
dt.readings(filter=True)
print()
dt.models()
def drt_discourse_demo(reading_command=None):
"""
Illustrate the various methods of ``DiscourseTester``
"""
dt = DiscourseTester(["every dog chases a boy", "he runs"], reading_command)
dt.models()
print()
dt.sentences()
print()
dt.readings()
print()
dt.readings(show_thread_readings=True)
print()
dt.readings(filter=True, show_thread_readings=True)
class MaltParser(ParserI):
"""
A class for dependency parsing with MaltParser. The input is the paths to:
- (optionally) a maltparser directory
- (optionally) the path to a pre-trained MaltParser .mco model file
- (optionally) the tagger to use for POS tagging before parsing
- (optionally) additional Java arguments
Example:
>>> from nltk.parse import malt
>>> # With MALT_PARSER and MALT_MODEL environment set.
>>> mp = malt.MaltParser(model_filename='engmalt.linear-1.7.mco') # doctest: +SKIP
>>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP
(shot I (elephant an) (in (pajamas my)) .)
>>> # Without MALT_PARSER and MALT_MODEL environment.
>>> mp = malt.MaltParser('/home/user/maltparser-1.9.2/', '/home/user/engmalt.linear-1.7.mco') # doctest: +SKIP
>>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP
(shot I (elephant an) (in (pajamas my)) .)
"""
def __init__(
self,
parser_dirname="",
model_filename=None,
tagger=None,
additional_java_args=None,
):
"""
An interface for parsing with the Malt Parser.
:param parser_dirname: The path to the maltparser directory that
contains the maltparser-1.x.jar
:type parser_dirname: str
:param model_filename: The name of the pre-trained model with .mco file
extension. If provided, training will not be required.
(see http://www.maltparser.org/mco/mco.html and
see http://www.patful.com/chalk/node/185)
:type model_filename: str
:param tagger: The tagger used to POS tag the raw string before
formatting to CONLL format. It should behave like `nltk.pos_tag`
:type tagger: function
:param additional_java_args: This is the additional Java arguments that
one can use when calling Maltparser, usually this is the heapsize
limits, e.g. `additional_java_args=['-Xmx1024m']`
(see https://goo.gl/mpDBvQ)
:type additional_java_args: list
"""
# Find all the necessary jar files for MaltParser.
self.malt_jars = find_maltparser(parser_dirname)
# Initialize additional java arguments.
self.additional_java_args = (
additional_java_args if additional_java_args is not None else []
)
# Initialize model.
self.model = find_malt_model(model_filename)
self._trained = self.model != "malt_temp.mco"
# Set the working_dir parameters i.e. `-w` from MaltParser's option.
self.working_dir = tempfile.gettempdir()
# Initialize POS tagger.
self.tagger = tagger if tagger is not None else malt_regex_tagger()
def parse_tagged_sents(self, sentences, verbose=False, top_relation_label="null"):
"""
Use MaltParser to parse multiple POS tagged sentences. Takes multiple
sentences where each sentence is a list of (word, tag) tuples.
The sentences must have already been tokenized and tagged.
:param sentences: Input sentences to parse
:type sentence: list(list(tuple(str, str)))
:return: iter(iter(``DependencyGraph``)) the dependency graph
representation of each sentence
"""
if not self._trained:
raise Exception("Parser has not been trained. Call train() first.")
with tempfile.NamedTemporaryFile(
prefix="malt_input.conll.", dir=self.working_dir, mode="w", delete=False
) as input_file:
with tempfile.NamedTemporaryFile(
prefix="malt_output.conll.",
dir=self.working_dir,
mode="w",
delete=False,
) as output_file:
# Convert list of sentences to CONLL format.
for line in taggedsents_to_conll(sentences):
input_file.write(str(line))
input_file.close()
# Generate command to run maltparser.
cmd = self.generate_malt_command(
input_file.name, output_file.name, mode="parse"
)
# This is a maltparser quirk, it needs to be run
# where the model file is. otherwise it goes into an awkward
# missing .jars or strange -w working_dir problem.
_current_path = os.getcwd() # Remembers the current path.
try: # Change to modelfile path
os.chdir(os.path.split(self.model)[0])
except:
pass
ret = self._execute(cmd, verbose) # Run command.
os.chdir(_current_path) # Change back to current path.
if ret != 0:
raise Exception(
"MaltParser parsing (%s) failed with exit "
"code %d" % (" ".join(cmd), ret)
)
# Must return iter(iter(Tree))
with open(output_file.name) as infile:
for tree_str in infile.read().split("\n\n"):
yield (
iter(
[
DependencyGraph(
tree_str, top_relation_label=top_relation_label
)
]
)
)
os.remove(input_file.name)
os.remove(output_file.name)
def parse_sents(self, sentences, verbose=False, top_relation_label="null"):
"""
Use MaltParser to parse multiple sentences.
Takes a list of sentences, where each sentence is a list of words.
Each sentence will be automatically tagged with this
MaltParser instance's tagger.
:param sentences: Input sentences to parse
:type sentence: list(list(str))
:return: iter(DependencyGraph)
"""
tagged_sentences = (self.tagger(sentence) for sentence in sentences)
return self.parse_tagged_sents(
tagged_sentences, verbose, top_relation_label=top_relation_label
)
def generate_malt_command(self, inputfilename, outputfilename=None, mode=None):
"""
This function generates the maltparser command use at the terminal.
:param inputfilename: path to the input file
:type inputfilename: str
:param outputfilename: path to the output file
:type outputfilename: str
"""
cmd = ["java"]
cmd += self.additional_java_args # Adds additional java arguments
# Joins classpaths with ";" if on Windows and on Linux/Mac use ":"
classpaths_separator = ";" if sys.platform.startswith("win") else ":"
cmd += [
"-cp",
classpaths_separator.join(self.malt_jars),
] # Adds classpaths for jars
cmd += ["org.maltparser.Malt"] # Adds the main function.
# Adds the model file.
if os.path.exists(self.model): # when parsing
cmd += ["-c", os.path.split(self.model)[-1]]
else: # when learning
cmd += ["-c", self.model]
cmd += ["-i", inputfilename]
if mode == "parse":
cmd += ["-o", outputfilename]
cmd += ["-m", mode] # mode use to generate parses.
return cmd
def _execute(cmd, verbose=False):
output = None if verbose else subprocess.PIPE
p = subprocess.Popen(cmd, stdout=output, stderr=output)
return p.wait()
def train(self, depgraphs, verbose=False):
"""
Train MaltParser from a list of ``DependencyGraph`` objects
:param depgraphs: list of ``DependencyGraph`` objects for training input data
:type depgraphs: DependencyGraph
"""
# Write the conll_str to malt_train.conll file in /tmp/
with tempfile.NamedTemporaryFile(
prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False
) as input_file:
input_str = "\n".join(dg.to_conll(10) for dg in depgraphs)
input_file.write(str(input_str))
# Trains the model with the malt_train.conll
self.train_from_file(input_file.name, verbose=verbose)
# Removes the malt_train.conll once training finishes.
os.remove(input_file.name)
def train_from_file(self, conll_file, verbose=False):
"""
Train MaltParser from a file
:param conll_file: str for the filename of the training input data
:type conll_file: str
"""
# If conll_file is a ZipFilePathPointer,
# then we need to do some extra massaging
if isinstance(conll_file, ZipFilePathPointer):
with tempfile.NamedTemporaryFile(
prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False
) as input_file:
with conll_file.open() as conll_input_file:
conll_str = conll_input_file.read()
input_file.write(str(conll_str))
return self.train_from_file(input_file.name, verbose=verbose)
# Generate command to run maltparser.
cmd = self.generate_malt_command(conll_file, mode="learn")
ret = self._execute(cmd, verbose)
if ret != 0:
raise Exception(
"MaltParser training (%s) failed with exit "
"code %d" % (" ".join(cmd), ret)
)
self._trained = True
def demo():
discourse_demo()
tagger = RegexpTagger(
[
("^(chases|runs)$", "VB"),
("^(a)$", "ex_quant"),
("^(every)$", "univ_quant"),
("^(dog|boy)$", "NN"),
("^(he)$", "PRP"),
]
)
depparser = MaltParser(tagger=tagger)
drt_discourse_demo(
DrtGlueReadingCommand(remove_duplicates=False, depparser=depparser)
) | null |
170,557 | from collections import defaultdict
from functools import reduce
from nltk.inference.api import Prover, ProverCommandDecorator
from nltk.inference.prover9 import Prover9, Prover9Command
from nltk.sem.logic import (
AbstractVariableExpression,
AllExpression,
AndExpression,
ApplicationExpression,
BooleanExpression,
EqualityExpression,
ExistsExpression,
Expression,
ImpExpression,
NegatedExpression,
Variable,
VariableExpression,
operator,
unique_variable,
)
def reduce(function: Callable[[_T, _S], _T], sequence: Iterable[_S], initial: _T) -> _T: ...
def reduce(function: Callable[[_T, _T], _T], sequence: Iterable[_T]) -> _T: ...
def get_domain(goal, assumptions):
if goal is None:
all_expressions = assumptions
else:
all_expressions = assumptions + [-goal]
return reduce(operator.or_, (a.constants() for a in all_expressions), set()) | null |
170,558 | from collections import defaultdict
from functools import reduce
from nltk.inference.api import Prover, ProverCommandDecorator
from nltk.inference.prover9 import Prover9, Prover9Command
from nltk.sem.logic import (
AbstractVariableExpression,
AllExpression,
AndExpression,
ApplicationExpression,
BooleanExpression,
EqualityExpression,
ExistsExpression,
Expression,
ImpExpression,
NegatedExpression,
Variable,
VariableExpression,
operator,
unique_variable,
)
def closed_domain_demo():
lexpr = Expression.fromstring
p1 = lexpr(r"exists x.walk(x)")
p2 = lexpr(r"man(Socrates)")
c = lexpr(r"walk(Socrates)")
prover = Prover9Command(c, [p1, p2])
print(prover.prove())
cdp = ClosedDomainProver(prover)
print("assumptions:")
for a in cdp.assumptions():
print(" ", a)
print("goal:", cdp.goal())
print(cdp.prove())
p1 = lexpr(r"exists x.walk(x)")
p2 = lexpr(r"man(Socrates)")
p3 = lexpr(r"-walk(Bill)")
c = lexpr(r"walk(Socrates)")
prover = Prover9Command(c, [p1, p2, p3])
print(prover.prove())
cdp = ClosedDomainProver(prover)
print("assumptions:")
for a in cdp.assumptions():
print(" ", a)
print("goal:", cdp.goal())
print(cdp.prove())
p1 = lexpr(r"exists x.walk(x)")
p2 = lexpr(r"man(Socrates)")
p3 = lexpr(r"-walk(Bill)")
c = lexpr(r"walk(Socrates)")
prover = Prover9Command(c, [p1, p2, p3])
print(prover.prove())
cdp = ClosedDomainProver(prover)
print("assumptions:")
for a in cdp.assumptions():
print(" ", a)
print("goal:", cdp.goal())
print(cdp.prove())
p1 = lexpr(r"walk(Socrates)")
p2 = lexpr(r"walk(Bill)")
c = lexpr(r"all x.walk(x)")
prover = Prover9Command(c, [p1, p2])
print(prover.prove())
cdp = ClosedDomainProver(prover)
print("assumptions:")
for a in cdp.assumptions():
print(" ", a)
print("goal:", cdp.goal())
print(cdp.prove())
p1 = lexpr(r"girl(mary)")
p2 = lexpr(r"dog(rover)")
p3 = lexpr(r"all x.(girl(x) -> -dog(x))")
p4 = lexpr(r"all x.(dog(x) -> -girl(x))")
p5 = lexpr(r"chase(mary, rover)")
c = lexpr(r"exists y.(dog(y) & all x.(girl(x) -> chase(x,y)))")
prover = Prover9Command(c, [p1, p2, p3, p4, p5])
print(prover.prove())
cdp = ClosedDomainProver(prover)
print("assumptions:")
for a in cdp.assumptions():
print(" ", a)
print("goal:", cdp.goal())
print(cdp.prove())
def unique_names_demo():
lexpr = Expression.fromstring
p1 = lexpr(r"man(Socrates)")
p2 = lexpr(r"man(Bill)")
c = lexpr(r"exists x.exists y.(x != y)")
prover = Prover9Command(c, [p1, p2])
print(prover.prove())
unp = UniqueNamesProver(prover)
print("assumptions:")
for a in unp.assumptions():
print(" ", a)
print("goal:", unp.goal())
print(unp.prove())
p1 = lexpr(r"all x.(walk(x) -> (x = Socrates))")
p2 = lexpr(r"Bill = William")
p3 = lexpr(r"Bill = Billy")
c = lexpr(r"-walk(William)")
prover = Prover9Command(c, [p1, p2, p3])
print(prover.prove())
unp = UniqueNamesProver(prover)
print("assumptions:")
for a in unp.assumptions():
print(" ", a)
print("goal:", unp.goal())
print(unp.prove())
def closed_world_demo():
lexpr = Expression.fromstring
p1 = lexpr(r"walk(Socrates)")
p2 = lexpr(r"(Socrates != Bill)")
c = lexpr(r"-walk(Bill)")
prover = Prover9Command(c, [p1, p2])
print(prover.prove())
cwp = ClosedWorldProver(prover)
print("assumptions:")
for a in cwp.assumptions():
print(" ", a)
print("goal:", cwp.goal())
print(cwp.prove())
p1 = lexpr(r"see(Socrates, John)")
p2 = lexpr(r"see(John, Mary)")
p3 = lexpr(r"(Socrates != John)")
p4 = lexpr(r"(John != Mary)")
c = lexpr(r"-see(Socrates, Mary)")
prover = Prover9Command(c, [p1, p2, p3, p4])
print(prover.prove())
cwp = ClosedWorldProver(prover)
print("assumptions:")
for a in cwp.assumptions():
print(" ", a)
print("goal:", cwp.goal())
print(cwp.prove())
p1 = lexpr(r"all x.(ostrich(x) -> bird(x))")
p2 = lexpr(r"bird(Tweety)")
p3 = lexpr(r"-ostrich(Sam)")
p4 = lexpr(r"Sam != Tweety")
c = lexpr(r"-bird(Sam)")
prover = Prover9Command(c, [p1, p2, p3, p4])
print(prover.prove())
cwp = ClosedWorldProver(prover)
print("assumptions:")
for a in cwp.assumptions():
print(" ", a)
print("goal:", cwp.goal())
print(cwp.prove())
def combination_prover_demo():
lexpr = Expression.fromstring
p1 = lexpr(r"see(Socrates, John)")
p2 = lexpr(r"see(John, Mary)")
c = lexpr(r"-see(Socrates, Mary)")
prover = Prover9Command(c, [p1, p2])
print(prover.prove())
command = ClosedDomainProver(UniqueNamesProver(ClosedWorldProver(prover)))
for a in command.assumptions():
print(a)
print(command.prove())
def default_reasoning_demo():
lexpr = Expression.fromstring
premises = []
# define taxonomy
premises.append(lexpr(r"all x.(elephant(x) -> animal(x))"))
premises.append(lexpr(r"all x.(bird(x) -> animal(x))"))
premises.append(lexpr(r"all x.(dove(x) -> bird(x))"))
premises.append(lexpr(r"all x.(ostrich(x) -> bird(x))"))
premises.append(lexpr(r"all x.(flying_ostrich(x) -> ostrich(x))"))
# default properties
premises.append(
lexpr(r"all x.((animal(x) & -Ab1(x)) -> -fly(x))")
) # normal animals don't fly
premises.append(
lexpr(r"all x.((bird(x) & -Ab2(x)) -> fly(x))")
) # normal birds fly
premises.append(
lexpr(r"all x.((ostrich(x) & -Ab3(x)) -> -fly(x))")
) # normal ostriches don't fly
# specify abnormal entities
premises.append(lexpr(r"all x.(bird(x) -> Ab1(x))")) # flight
premises.append(lexpr(r"all x.(ostrich(x) -> Ab2(x))")) # non-flying bird
premises.append(lexpr(r"all x.(flying_ostrich(x) -> Ab3(x))")) # flying ostrich
# define entities
premises.append(lexpr(r"elephant(E)"))
premises.append(lexpr(r"dove(D)"))
premises.append(lexpr(r"ostrich(O)"))
# print the assumptions
prover = Prover9Command(None, premises)
command = UniqueNamesProver(ClosedWorldProver(prover))
for a in command.assumptions():
print(a)
print_proof("-fly(E)", premises)
print_proof("fly(D)", premises)
print_proof("-fly(O)", premises)
def demo():
closed_domain_demo()
unique_names_demo()
closed_world_demo()
combination_prover_demo()
default_reasoning_demo() | null |
170,559 | import os
import subprocess
import nltk
from nltk.inference.api import BaseProverCommand, Prover
from nltk.sem.logic import (
AllExpression,
AndExpression,
EqualityExpression,
ExistsExpression,
Expression,
IffExpression,
ImpExpression,
NegatedExpression,
OrExpression,
)
def test_config():
a = Expression.fromstring("(walk(j) & sing(j))")
g = Expression.fromstring("walk(j)")
p = Prover9Command(g, assumptions=[a])
p._executable_path = None
p.prover9_search = []
p.prove()
# config_prover9('/usr/local/bin')
print(p.prove())
print(p.proof())
def test_convert_to_prover9(expr):
"""
Test that parsing works OK.
"""
for t in expr:
e = Expression.fromstring(t)
print(convert_to_prover9(e))
def test_prove(arguments):
"""
Try some proofs and exhibit the results.
"""
for (goal, assumptions) in arguments:
g = Expression.fromstring(goal)
alist = [Expression.fromstring(a) for a in assumptions]
p = Prover9Command(g, assumptions=alist).prove()
for a in alist:
print(" %s" % a)
print(f"|- {g}: {p}\n")
arguments = [
("(man(x) <-> (not (not man(x))))", []),
("(not (man(x) & (not man(x))))", []),
("(man(x) | (not man(x)))", []),
("(man(x) & (not man(x)))", []),
("(man(x) -> man(x))", []),
("(not (man(x) & (not man(x))))", []),
("(man(x) | (not man(x)))", []),
("(man(x) -> man(x))", []),
("(man(x) <-> man(x))", []),
("(not (man(x) <-> (not man(x))))", []),
("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]),
("((all x.(man(x) -> walks(x)) & man(Socrates)) -> some y.walks(y))", []),
("(all x.man(x) -> all x.man(x))", []),
("some x.all y.sees(x,y)", []),
(
"some e3.(walk(e3) & subj(e3, mary))",
[
"some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))"
],
),
(
"some x e1.(see(e1) & subj(e1, x) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))",
[
"some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))"
],
),
]
expressions = [
r"some x y.sees(x,y)",
r"some x.(man(x) & walks(x))",
r"\x.(man(x) & walks(x))",
r"\x y.sees(x,y)",
r"walks(john)",
r"\x.big(x, \y.mouse(y))",
r"(walks(x) & (runs(x) & (threes(x) & fours(x))))",
r"(walks(x) -> runs(x))",
r"some x.(PRO(x) & sees(John, x))",
r"some x.(man(x) & (not walks(x)))",
r"all x.(man(x) -> walks(x))",
]
def spacer(num=45):
print("-" * num)
def demo():
print("Testing configuration")
spacer()
test_config()
print()
print("Testing conversion to Prover9 format")
spacer()
test_convert_to_prover9(expressions)
print()
print("Testing proofs")
spacer()
test_prove(arguments) | null |
170,560 | from abc import ABCMeta, abstractmethod
from tkinter import (
RAISED,
Button,
Canvas,
Entry,
Frame,
Label,
Menu,
Menubutton,
Scrollbar,
StringVar,
Text,
Tk,
Toplevel,
Widget,
)
from tkinter.filedialog import asksaveasfilename
from nltk.util import in_idle
class TextWidget(CanvasWidget):
"""
A canvas widget that displays a single string of text.
Attributes:
- ``color``: the color of the text.
- ``font``: the font used to display the text.
- ``justify``: justification for multi-line texts. Valid values
are ``left``, ``center``, and ``right``.
- ``width``: the width of the text. If the text is wider than
this width, it will be line-wrapped at whitespace.
- ``draggable``: whether the text can be dragged by the user.
"""
def __init__(self, canvas, text, **attribs):
"""
Create a new text widget.
:type canvas: Tkinter.Canvas
:param canvas: This canvas widget's canvas.
:type text: str
:param text: The string of text to display.
:param attribs: The new canvas widget's attributes.
"""
self._text = text
self._tag = canvas.create_text(1, 1, text=text)
CanvasWidget.__init__(self, canvas, **attribs)
def __setitem__(self, attr, value):
if attr in ("color", "font", "justify", "width"):
if attr == "color":
attr = "fill"
self.canvas().itemconfig(self._tag, {attr: value})
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == "width":
return int(self.canvas().itemcget(self._tag, attr))
elif attr in ("color", "font", "justify"):
if attr == "color":
attr = "fill"
return self.canvas().itemcget(self._tag, attr)
else:
return CanvasWidget.__getitem__(self, attr)
def _tags(self):
return [self._tag]
def text(self):
"""
:return: The text displayed by this text widget.
:rtype: str
"""
return self.canvas().itemcget(self._tag, "TEXT")
def set_text(self, text):
"""
Change the text that is displayed by this text widget.
:type text: str
:param text: The string of text to display.
:rtype: None
"""
self.canvas().itemconfig(self._tag, text=text)
if self.parent() is not None:
self.parent().update(self)
def __repr__(self):
return "[Text: %r]" % self._text
class SymbolWidget(TextWidget):
"""
A canvas widget that displays special symbols, such as the
negation sign and the exists operator. Symbols are specified by
name. Currently, the following symbol names are defined: ``neg``,
``disj``, ``conj``, ``lambda``, ``merge``, ``forall``, ``exists``,
``subseteq``, ``subset``, ``notsubset``, ``emptyset``, ``imp``,
``rightarrow``, ``equal``, ``notequal``, ``epsilon``.
Attributes:
- ``color``: the color of the text.
- ``draggable``: whether the text can be dragged by the user.
:cvar SYMBOLS: A dictionary mapping from symbols to the character
in the ``symbol`` font used to render them.
"""
SYMBOLS = {
"neg": "\330",
"disj": "\332",
"conj": "\331",
"lambda": "\154",
"merge": "\304",
"forall": "\042",
"exists": "\044",
"subseteq": "\315",
"subset": "\314",
"notsubset": "\313",
"emptyset": "\306",
"imp": "\336",
"rightarrow": chr(222), #'\256',
"equal": "\75",
"notequal": "\271",
"intersection": "\307",
"union": "\310",
"epsilon": "e",
}
def __init__(self, canvas, symbol, **attribs):
"""
Create a new symbol widget.
:type canvas: Tkinter.Canvas
:param canvas: This canvas widget's canvas.
:type symbol: str
:param symbol: The name of the symbol to display.
:param attribs: The new canvas widget's attributes.
"""
attribs["font"] = "symbol"
TextWidget.__init__(self, canvas, "", **attribs)
self.set_symbol(symbol)
def symbol(self):
"""
:return: the name of the symbol that is displayed by this
symbol widget.
:rtype: str
"""
return self._symbol
def set_symbol(self, symbol):
"""
Change the symbol that is displayed by this symbol widget.
:type symbol: str
:param symbol: The name of the symbol to display.
"""
if symbol not in SymbolWidget.SYMBOLS:
raise ValueError("Unknown symbol: %s" % symbol)
self._symbol = symbol
self.set_text(SymbolWidget.SYMBOLS[symbol])
def __repr__(self):
return "[Symbol: %r]" % self._symbol
def symbolsheet(size=20):
"""
Open a new Tkinter window that displays the entire alphabet
for the symbol font. This is useful for constructing the
``SymbolWidget.SYMBOLS`` dictionary.
"""
top = Tk()
def destroy(e, top=top):
top.destroy()
top.bind("q", destroy)
Button(top, text="Quit", command=top.destroy).pack(side="bottom")
text = Text(top, font=("helvetica", -size), width=20, height=30)
text.pack(side="left")
sb = Scrollbar(top, command=text.yview)
text["yscrollcommand"] = sb.set
sb.pack(side="right", fill="y")
text.tag_config("symbol", font=("symbol", -size))
for i in range(256):
if i in (0, 10):
continue # null and newline
for k, v in list(SymbolWidget.SYMBOLS.items()):
if v == chr(i):
text.insert("end", "%-10s\t" % k)
break
else:
text.insert("end", "%-10d \t" % i)
text.insert("end", "[%s]\n" % chr(i), "symbol")
top.mainloop()
class BoxWidget(AbstractContainerWidget):
"""
A canvas widget that places a box around a child widget.
Attributes:
- ``fill``: The color used to fill the interior of the box.
- ``outline``: The color used to draw the outline of the box.
- ``width``: The width of the outline of the box.
- ``margin``: The number of pixels space left between the child
and the box.
- ``draggable``: whether the text can be dragged by the user.
"""
def __init__(self, canvas, child, **attribs):
"""
Create a new box widget.
:type canvas: Tkinter.Canvas
:param canvas: This canvas widget's canvas.
:param child: The child widget. ``child`` must not have a
parent.
:type child: CanvasWidget
:param attribs: The new canvas widget's attributes.
"""
self._child = child
self._margin = 1
self._box = canvas.create_rectangle(1, 1, 1, 1)
canvas.tag_lower(self._box)
AbstractContainerWidget.__init__(self, canvas, child, **attribs)
def __setitem__(self, attr, value):
if attr == "margin":
self._margin = value
elif attr in ("outline", "fill", "width"):
self.canvas().itemconfig(self._box, {attr: value})
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == "margin":
return self._margin
elif attr == "width":
return float(self.canvas().itemcget(self._box, attr))
elif attr in ("outline", "fill", "width"):
return self.canvas().itemcget(self._box, attr)
else:
return CanvasWidget.__getitem__(self, attr)
def _update(self, child):
(x1, y1, x2, y2) = child.bbox()
margin = self._margin + self["width"] / 2
self.canvas().coords(
self._box, x1 - margin, y1 - margin, x2 + margin, y2 + margin
)
def _tags(self):
return [self._box]
class OvalWidget(AbstractContainerWidget):
"""
A canvas widget that places a oval around a child widget.
Attributes:
- ``fill``: The color used to fill the interior of the oval.
- ``outline``: The color used to draw the outline of the oval.
- ``width``: The width of the outline of the oval.
- ``margin``: The number of pixels space left between the child
and the oval.
- ``draggable``: whether the text can be dragged by the user.
- ``double``: If true, then a double-oval is drawn.
"""
def __init__(self, canvas, child, **attribs):
"""
Create a new oval widget.
:type canvas: Tkinter.Canvas
:param canvas: This canvas widget's canvas.
:param child: The child widget. ``child`` must not have a
parent.
:type child: CanvasWidget
:param attribs: The new canvas widget's attributes.
"""
self._child = child
self._margin = 1
self._oval = canvas.create_oval(1, 1, 1, 1)
self._circle = attribs.pop("circle", False)
self._double = attribs.pop("double", False)
if self._double:
self._oval2 = canvas.create_oval(1, 1, 1, 1)
else:
self._oval2 = None
canvas.tag_lower(self._oval)
AbstractContainerWidget.__init__(self, canvas, child, **attribs)
def __setitem__(self, attr, value):
c = self.canvas()
if attr == "margin":
self._margin = value
elif attr == "double":
if value == True and self._oval2 is None:
# Copy attributes & position from self._oval.
x1, y1, x2, y2 = c.bbox(self._oval)
w = self["width"] * 2
self._oval2 = c.create_oval(
x1 - w,
y1 - w,
x2 + w,
y2 + w,
outline=c.itemcget(self._oval, "outline"),
width=c.itemcget(self._oval, "width"),
)
c.tag_lower(self._oval2)
if value == False and self._oval2 is not None:
c.delete(self._oval2)
self._oval2 = None
elif attr in ("outline", "fill", "width"):
c.itemconfig(self._oval, {attr: value})
if self._oval2 is not None and attr != "fill":
c.itemconfig(self._oval2, {attr: value})
if self._oval2 is not None and attr != "fill":
self.canvas().itemconfig(self._oval2, {attr: value})
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == "margin":
return self._margin
elif attr == "double":
return self._double is not None
elif attr == "width":
return float(self.canvas().itemcget(self._oval, attr))
elif attr in ("outline", "fill", "width"):
return self.canvas().itemcget(self._oval, attr)
else:
return CanvasWidget.__getitem__(self, attr)
# The ratio between inscribed & circumscribed ovals
RATIO = 1.4142135623730949
def _update(self, child):
R = OvalWidget.RATIO
(x1, y1, x2, y2) = child.bbox()
margin = self._margin
# If we're a circle, pretend our contents are square.
if self._circle:
dx, dy = abs(x1 - x2), abs(y1 - y2)
if dx > dy:
y = (y1 + y2) / 2
y1, y2 = y - dx / 2, y + dx / 2
elif dy > dx:
x = (x1 + x2) / 2
x1, x2 = x - dy / 2, x + dy / 2
# Find the four corners.
left = int((x1 * (1 + R) + x2 * (1 - R)) / 2)
right = left + int((x2 - x1) * R)
top = int((y1 * (1 + R) + y2 * (1 - R)) / 2)
bot = top + int((y2 - y1) * R)
self.canvas().coords(
self._oval, left - margin, top - margin, right + margin, bot + margin
)
if self._oval2 is not None:
self.canvas().coords(
self._oval2,
left - margin + 2,
top - margin + 2,
right + margin - 2,
bot + margin - 2,
)
def _tags(self):
if self._oval2 is None:
return [self._oval]
else:
return [self._oval, self._oval2]
class ParenWidget(AbstractContainerWidget):
"""
A canvas widget that places a pair of parenthases around a child
widget.
Attributes:
- ``color``: The color used to draw the parenthases.
- ``width``: The width of the parenthases.
- ``draggable``: whether the text can be dragged by the user.
"""
def __init__(self, canvas, child, **attribs):
"""
Create a new parenthasis widget.
:type canvas: Tkinter.Canvas
:param canvas: This canvas widget's canvas.
:param child: The child widget. ``child`` must not have a
parent.
:type child: CanvasWidget
:param attribs: The new canvas widget's attributes.
"""
self._child = child
self._oparen = canvas.create_arc(1, 1, 1, 1, style="arc", start=90, extent=180)
self._cparen = canvas.create_arc(1, 1, 1, 1, style="arc", start=-90, extent=180)
AbstractContainerWidget.__init__(self, canvas, child, **attribs)
def __setitem__(self, attr, value):
if attr == "color":
self.canvas().itemconfig(self._oparen, outline=value)
self.canvas().itemconfig(self._cparen, outline=value)
elif attr == "width":
self.canvas().itemconfig(self._oparen, width=value)
self.canvas().itemconfig(self._cparen, width=value)
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == "color":
return self.canvas().itemcget(self._oparen, "outline")
elif attr == "width":
return self.canvas().itemcget(self._oparen, "width")
else:
return CanvasWidget.__getitem__(self, attr)
def _update(self, child):
(x1, y1, x2, y2) = child.bbox()
width = max((y2 - y1) / 6, 4)
self.canvas().coords(self._oparen, x1 - width, y1, x1 + width, y2)
self.canvas().coords(self._cparen, x2 - width, y1, x2 + width, y2)
def _tags(self):
return [self._oparen, self._cparen]
class BracketWidget(AbstractContainerWidget):
"""
A canvas widget that places a pair of brackets around a child
widget.
Attributes:
- ``color``: The color used to draw the brackets.
- ``width``: The width of the brackets.
- ``draggable``: whether the text can be dragged by the user.
"""
def __init__(self, canvas, child, **attribs):
"""
Create a new bracket widget.
:type canvas: Tkinter.Canvas
:param canvas: This canvas widget's canvas.
:param child: The child widget. ``child`` must not have a
parent.
:type child: CanvasWidget
:param attribs: The new canvas widget's attributes.
"""
self._child = child
self._obrack = canvas.create_line(1, 1, 1, 1, 1, 1, 1, 1)
self._cbrack = canvas.create_line(1, 1, 1, 1, 1, 1, 1, 1)
AbstractContainerWidget.__init__(self, canvas, child, **attribs)
def __setitem__(self, attr, value):
if attr == "color":
self.canvas().itemconfig(self._obrack, fill=value)
self.canvas().itemconfig(self._cbrack, fill=value)
elif attr == "width":
self.canvas().itemconfig(self._obrack, width=value)
self.canvas().itemconfig(self._cbrack, width=value)
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == "color":
return self.canvas().itemcget(self._obrack, "outline")
elif attr == "width":
return self.canvas().itemcget(self._obrack, "width")
else:
return CanvasWidget.__getitem__(self, attr)
def _update(self, child):
(x1, y1, x2, y2) = child.bbox()
width = max((y2 - y1) / 8, 2)
self.canvas().coords(
self._obrack, x1, y1, x1 - width, y1, x1 - width, y2, x1, y2
)
self.canvas().coords(
self._cbrack, x2, y1, x2 + width, y1, x2 + width, y2, x2, y2
)
def _tags(self):
return [self._obrack, self._cbrack]
class SequenceWidget(CanvasWidget):
"""
A canvas widget that keeps a list of canvas widgets in a
horizontal line.
Attributes:
- ``align``: The vertical alignment of the children. Possible
values are ``'top'``, ``'center'``, and ``'bottom'``. By
default, children are center-aligned.
- ``space``: The amount of horizontal space to place between
children. By default, one pixel of space is used.
- ``ordered``: If true, then keep the children in their
original order.
"""
def __init__(self, canvas, *children, **attribs):
"""
Create a new sequence widget.
:type canvas: Tkinter.Canvas
:param canvas: This canvas widget's canvas.
:param children: The widgets that should be aligned
horizontally. Each child must not have a parent.
:type children: list(CanvasWidget)
:param attribs: The new canvas widget's attributes.
"""
self._align = "center"
self._space = 1
self._ordered = False
self._children = list(children)
for child in children:
self._add_child_widget(child)
CanvasWidget.__init__(self, canvas, **attribs)
def __setitem__(self, attr, value):
if attr == "align":
if value not in ("top", "bottom", "center"):
raise ValueError("Bad alignment: %r" % value)
self._align = value
elif attr == "space":
self._space = value
elif attr == "ordered":
self._ordered = value
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == "align":
return self._align
elif attr == "space":
return self._space
elif attr == "ordered":
return self._ordered
else:
return CanvasWidget.__getitem__(self, attr)
def _tags(self):
return []
def _yalign(self, top, bot):
if self._align == "top":
return top
if self._align == "bottom":
return bot
if self._align == "center":
return (top + bot) / 2
def _update(self, child):
# Align all children with child.
(left, top, right, bot) = child.bbox()
y = self._yalign(top, bot)
for c in self._children:
(x1, y1, x2, y2) = c.bbox()
c.move(0, y - self._yalign(y1, y2))
if self._ordered and len(self._children) > 1:
index = self._children.index(child)
x = right + self._space
for i in range(index + 1, len(self._children)):
(x1, y1, x2, y2) = self._children[i].bbox()
if x > x1:
self._children[i].move(x - x1, 0)
x += x2 - x1 + self._space
x = left - self._space
for i in range(index - 1, -1, -1):
(x1, y1, x2, y2) = self._children[i].bbox()
if x < x2:
self._children[i].move(x - x2, 0)
x -= x2 - x1 + self._space
def _manage(self):
if len(self._children) == 0:
return
child = self._children[0]
# Align all children with child.
(left, top, right, bot) = child.bbox()
y = self._yalign(top, bot)
index = self._children.index(child)
# Line up children to the right of child.
x = right + self._space
for i in range(index + 1, len(self._children)):
(x1, y1, x2, y2) = self._children[i].bbox()
self._children[i].move(x - x1, y - self._yalign(y1, y2))
x += x2 - x1 + self._space
# Line up children to the left of child.
x = left - self._space
for i in range(index - 1, -1, -1):
(x1, y1, x2, y2) = self._children[i].bbox()
self._children[i].move(x - x2, y - self._yalign(y1, y2))
x -= x2 - x1 + self._space
def __repr__(self):
return "[Sequence: " + repr(self._children)[1:-1] + "]"
# Provide an alias for the child_widgets() member.
children = CanvasWidget.child_widgets
def replace_child(self, oldchild, newchild):
"""
Replace the child canvas widget ``oldchild`` with ``newchild``.
``newchild`` must not have a parent. ``oldchild``'s parent will
be set to None.
:type oldchild: CanvasWidget
:param oldchild: The child canvas widget to remove.
:type newchild: CanvasWidget
:param newchild: The canvas widget that should replace
``oldchild``.
"""
index = self._children.index(oldchild)
self._children[index] = newchild
self._remove_child_widget(oldchild)
self._add_child_widget(newchild)
self.update(newchild)
def remove_child(self, child):
"""
Remove the given child canvas widget. ``child``'s parent will
be set to None.
:type child: CanvasWidget
:param child: The child canvas widget to remove.
"""
index = self._children.index(child)
del self._children[index]
self._remove_child_widget(child)
if len(self._children) > 0:
self.update(self._children[0])
def insert_child(self, index, child):
"""
Insert a child canvas widget before a given index.
:type child: CanvasWidget
:param child: The canvas widget that should be inserted.
:type index: int
:param index: The index where the child widget should be
inserted. In particular, the index of ``child`` will be
``index``; and the index of any children whose indices were
greater than equal to ``index`` before ``child`` was
inserted will be incremented by one.
"""
self._children.insert(index, child)
self._add_child_widget(child)
class StackWidget(CanvasWidget):
"""
A canvas widget that keeps a list of canvas widgets in a vertical
line.
Attributes:
- ``align``: The horizontal alignment of the children. Possible
values are ``'left'``, ``'center'``, and ``'right'``. By
default, children are center-aligned.
- ``space``: The amount of vertical space to place between
children. By default, one pixel of space is used.
- ``ordered``: If true, then keep the children in their
original order.
"""
def __init__(self, canvas, *children, **attribs):
"""
Create a new stack widget.
:type canvas: Tkinter.Canvas
:param canvas: This canvas widget's canvas.
:param children: The widgets that should be aligned
vertically. Each child must not have a parent.
:type children: list(CanvasWidget)
:param attribs: The new canvas widget's attributes.
"""
self._align = "center"
self._space = 1
self._ordered = False
self._children = list(children)
for child in children:
self._add_child_widget(child)
CanvasWidget.__init__(self, canvas, **attribs)
def __setitem__(self, attr, value):
if attr == "align":
if value not in ("left", "right", "center"):
raise ValueError("Bad alignment: %r" % value)
self._align = value
elif attr == "space":
self._space = value
elif attr == "ordered":
self._ordered = value
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == "align":
return self._align
elif attr == "space":
return self._space
elif attr == "ordered":
return self._ordered
else:
return CanvasWidget.__getitem__(self, attr)
def _tags(self):
return []
def _xalign(self, left, right):
if self._align == "left":
return left
if self._align == "right":
return right
if self._align == "center":
return (left + right) / 2
def _update(self, child):
# Align all children with child.
(left, top, right, bot) = child.bbox()
x = self._xalign(left, right)
for c in self._children:
(x1, y1, x2, y2) = c.bbox()
c.move(x - self._xalign(x1, x2), 0)
if self._ordered and len(self._children) > 1:
index = self._children.index(child)
y = bot + self._space
for i in range(index + 1, len(self._children)):
(x1, y1, x2, y2) = self._children[i].bbox()
if y > y1:
self._children[i].move(0, y - y1)
y += y2 - y1 + self._space
y = top - self._space
for i in range(index - 1, -1, -1):
(x1, y1, x2, y2) = self._children[i].bbox()
if y < y2:
self._children[i].move(0, y - y2)
y -= y2 - y1 + self._space
def _manage(self):
if len(self._children) == 0:
return
child = self._children[0]
# Align all children with child.
(left, top, right, bot) = child.bbox()
x = self._xalign(left, right)
index = self._children.index(child)
# Line up children below the child.
y = bot + self._space
for i in range(index + 1, len(self._children)):
(x1, y1, x2, y2) = self._children[i].bbox()
self._children[i].move(x - self._xalign(x1, x2), y - y1)
y += y2 - y1 + self._space
# Line up children above the child.
y = top - self._space
for i in range(index - 1, -1, -1):
(x1, y1, x2, y2) = self._children[i].bbox()
self._children[i].move(x - self._xalign(x1, x2), y - y2)
y -= y2 - y1 + self._space
def __repr__(self):
return "[Stack: " + repr(self._children)[1:-1] + "]"
# Provide an alias for the child_widgets() member.
children = CanvasWidget.child_widgets
def replace_child(self, oldchild, newchild):
"""
Replace the child canvas widget ``oldchild`` with ``newchild``.
``newchild`` must not have a parent. ``oldchild``'s parent will
be set to None.
:type oldchild: CanvasWidget
:param oldchild: The child canvas widget to remove.
:type newchild: CanvasWidget
:param newchild: The canvas widget that should replace
``oldchild``.
"""
index = self._children.index(oldchild)
self._children[index] = newchild
self._remove_child_widget(oldchild)
self._add_child_widget(newchild)
self.update(newchild)
def remove_child(self, child):
"""
Remove the given child canvas widget. ``child``'s parent will
be set to None.
:type child: CanvasWidget
:param child: The child canvas widget to remove.
"""
index = self._children.index(child)
del self._children[index]
self._remove_child_widget(child)
if len(self._children) > 0:
self.update(self._children[0])
def insert_child(self, index, child):
"""
Insert a child canvas widget before a given index.
:type child: CanvasWidget
:param child: The canvas widget that should be inserted.
:type index: int
:param index: The index where the child widget should be
inserted. In particular, the index of ``child`` will be
``index``; and the index of any children whose indices were
greater than equal to ``index`` before ``child`` was
inserted will be incremented by one.
"""
self._children.insert(index, child)
self._add_child_widget(child)
class SpaceWidget(CanvasWidget):
"""
A canvas widget that takes up space but does not display
anything. A ``SpaceWidget`` can be used to add space between
elements. Each space widget is characterized by a width and a
height. If you wish to only create horizontal space, then use a
height of zero; and if you wish to only create vertical space, use
a width of zero.
"""
def __init__(self, canvas, width, height, **attribs):
"""
Create a new space widget.
:type canvas: Tkinter.Canvas
:param canvas: This canvas widget's canvas.
:type width: int
:param width: The width of the new space widget.
:type height: int
:param height: The height of the new space widget.
:param attribs: The new canvas widget's attributes.
"""
# For some reason,
if width > 4:
width -= 4
if height > 4:
height -= 4
self._tag = canvas.create_line(1, 1, width, height, fill="")
CanvasWidget.__init__(self, canvas, **attribs)
# note: width() and height() are already defined by CanvasWidget.
def set_width(self, width):
"""
Change the width of this space widget.
:param width: The new width.
:type width: int
:rtype: None
"""
[x1, y1, x2, y2] = self.bbox()
self.canvas().coords(self._tag, x1, y1, x1 + width, y2)
def set_height(self, height):
"""
Change the height of this space widget.
:param height: The new height.
:type height: int
:rtype: None
"""
[x1, y1, x2, y2] = self.bbox()
self.canvas().coords(self._tag, x1, y1, x2, y1 + height)
def _tags(self):
return [self._tag]
def __repr__(self):
return "[Space]"
class CanvasFrame:
"""
A ``Tkinter`` frame containing a canvas and scrollbars.
``CanvasFrame`` uses a ``ScrollWatcherWidget`` to ensure that all of
the canvas widgets contained on its canvas are within its
scrollregion. In order for ``CanvasFrame`` to make these checks,
all canvas widgets must be registered with ``add_widget`` when they
are added to the canvas; and destroyed with ``destroy_widget`` when
they are no longer needed.
If a ``CanvasFrame`` is created with no parent, then it will create
its own main window, including a "Done" button and a "Print"
button.
"""
def __init__(self, parent=None, **kw):
"""
Create a new ``CanvasFrame``.
:type parent: Tkinter.BaseWidget or Tkinter.Tk
:param parent: The parent ``Tkinter`` widget. If no parent is
specified, then ``CanvasFrame`` will create a new main
window.
:param kw: Keyword arguments for the new ``Canvas``. See the
documentation for ``Tkinter.Canvas`` for more information.
"""
# If no parent was given, set up a top-level window.
if parent is None:
self._parent = Tk()
self._parent.title("NLTK")
self._parent.bind("<Control-p>", lambda e: self.print_to_file())
self._parent.bind("<Control-x>", self.destroy)
self._parent.bind("<Control-q>", self.destroy)
else:
self._parent = parent
# Create a frame for the canvas & scrollbars
self._frame = frame = Frame(self._parent)
self._canvas = canvas = Canvas(frame, **kw)
xscrollbar = Scrollbar(self._frame, orient="horizontal")
yscrollbar = Scrollbar(self._frame, orient="vertical")
xscrollbar["command"] = canvas.xview
yscrollbar["command"] = canvas.yview
canvas["xscrollcommand"] = xscrollbar.set
canvas["yscrollcommand"] = yscrollbar.set
yscrollbar.pack(fill="y", side="right")
xscrollbar.pack(fill="x", side="bottom")
canvas.pack(expand=1, fill="both", side="left")
# Set initial scroll region.
scrollregion = "0 0 {} {}".format(canvas["width"], canvas["height"])
canvas["scrollregion"] = scrollregion
self._scrollwatcher = ScrollWatcherWidget(canvas)
# If no parent was given, pack the frame, and add a menu.
if parent is None:
self.pack(expand=1, fill="both")
self._init_menubar()
def _init_menubar(self):
menubar = Menu(self._parent)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(
label="Print to Postscript",
underline=0,
command=self.print_to_file,
accelerator="Ctrl-p",
)
filemenu.add_command(
label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x"
)
menubar.add_cascade(label="File", underline=0, menu=filemenu)
self._parent.config(menu=menubar)
def print_to_file(self, filename=None):
"""
Print the contents of this ``CanvasFrame`` to a postscript
file. If no filename is given, then prompt the user for one.
:param filename: The name of the file to print the tree to.
:type filename: str
:rtype: None
"""
if filename is None:
ftypes = [("Postscript files", ".ps"), ("All files", "*")]
filename = asksaveasfilename(filetypes=ftypes, defaultextension=".ps")
if not filename:
return
(x0, y0, w, h) = self.scrollregion()
postscript = self._canvas.postscript(
x=x0,
y=y0,
width=w + 2,
height=h + 2,
pagewidth=w + 2, # points = 1/72 inch
pageheight=h + 2, # points = 1/72 inch
pagex=0,
pagey=0,
)
# workaround for bug in Tk font handling
postscript = postscript.replace(" 0 scalefont ", " 9 scalefont ")
with open(filename, "wb") as f:
f.write(postscript.encode("utf8"))
def scrollregion(self):
"""
:return: The current scroll region for the canvas managed by
this ``CanvasFrame``.
:rtype: 4-tuple of int
"""
(x1, y1, x2, y2) = self._canvas["scrollregion"].split()
return (int(x1), int(y1), int(x2), int(y2))
def canvas(self):
"""
:return: The canvas managed by this ``CanvasFrame``.
:rtype: Tkinter.Canvas
"""
return self._canvas
def add_widget(self, canvaswidget, x=None, y=None):
"""
Register a canvas widget with this ``CanvasFrame``. The
``CanvasFrame`` will ensure that this canvas widget is always
within the ``Canvas``'s scrollregion. If no coordinates are
given for the canvas widget, then the ``CanvasFrame`` will
attempt to find a clear area of the canvas for it.
:type canvaswidget: CanvasWidget
:param canvaswidget: The new canvas widget. ``canvaswidget``
must have been created on this ``CanvasFrame``'s canvas.
:type x: int
:param x: The initial x coordinate for the upper left hand
corner of ``canvaswidget``, in the canvas's coordinate
space.
:type y: int
:param y: The initial y coordinate for the upper left hand
corner of ``canvaswidget``, in the canvas's coordinate
space.
"""
if x is None or y is None:
(x, y) = self._find_room(canvaswidget, x, y)
# Move to (x,y)
(x1, y1, x2, y2) = canvaswidget.bbox()
canvaswidget.move(x - x1, y - y1)
# Register with scrollwatcher.
self._scrollwatcher.add_child(canvaswidget)
def _find_room(self, widget, desired_x, desired_y):
"""
Try to find a space for a given widget.
"""
(left, top, right, bot) = self.scrollregion()
w = widget.width()
h = widget.height()
if w >= (right - left):
return (0, 0)
if h >= (bot - top):
return (0, 0)
# Move the widget out of the way, for now.
(x1, y1, x2, y2) = widget.bbox()
widget.move(left - x2 - 50, top - y2 - 50)
if desired_x is not None:
x = desired_x
for y in range(top, bot - h, int((bot - top - h) / 10)):
if not self._canvas.find_overlapping(
x - 5, y - 5, x + w + 5, y + h + 5
):
return (x, y)
if desired_y is not None:
y = desired_y
for x in range(left, right - w, int((right - left - w) / 10)):
if not self._canvas.find_overlapping(
x - 5, y - 5, x + w + 5, y + h + 5
):
return (x, y)
for y in range(top, bot - h, int((bot - top - h) / 10)):
for x in range(left, right - w, int((right - left - w) / 10)):
if not self._canvas.find_overlapping(
x - 5, y - 5, x + w + 5, y + h + 5
):
return (x, y)
return (0, 0)
def destroy_widget(self, canvaswidget):
"""
Remove a canvas widget from this ``CanvasFrame``. This
deregisters the canvas widget, and destroys it.
"""
self.remove_widget(canvaswidget)
canvaswidget.destroy()
def remove_widget(self, canvaswidget):
# Deregister with scrollwatcher.
self._scrollwatcher.remove_child(canvaswidget)
def pack(self, cnf={}, **kw):
"""
Pack this ``CanvasFrame``. See the documentation for
``Tkinter.Pack`` for more information.
"""
self._frame.pack(cnf, **kw)
# Adjust to be big enough for kids?
def destroy(self, *e):
"""
Destroy this ``CanvasFrame``. If this ``CanvasFrame`` created a
top-level window, then this will close that window.
"""
if self._parent is None:
return
self._parent.destroy()
self._parent = None
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this frame is created from a non-interactive program (e.g.
from a secript); otherwise, the frame will close as soon as
the script completes.
"""
if in_idle():
return
self._parent.mainloop(*args, **kwargs)
def randint(a: int, b: int) -> int: ...
The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem:
A simple demonstration showing how to use canvas widgets.
Here is the function:
def demo():
"""
A simple demonstration showing how to use canvas widgets.
"""
def fill(cw):
from random import randint
cw["fill"] = "#00%04d" % randint(0, 9999)
def color(cw):
from random import randint
cw["color"] = "#ff%04d" % randint(0, 9999)
cf = CanvasFrame(closeenough=10, width=300, height=300)
c = cf.canvas()
ct3 = TextWidget(c, "hiya there", draggable=1)
ct2 = TextWidget(c, "o o\n||\n___\n U", draggable=1, justify="center")
co = OvalWidget(c, ct2, outline="red")
ct = TextWidget(c, "o o\n||\n\\___/", draggable=1, justify="center")
cp = ParenWidget(c, ct, color="red")
cb = BoxWidget(c, cp, fill="cyan", draggable=1, width=3, margin=10)
equation = SequenceWidget(
c,
SymbolWidget(c, "forall"),
TextWidget(c, "x"),
SymbolWidget(c, "exists"),
TextWidget(c, "y: "),
TextWidget(c, "x"),
SymbolWidget(c, "notequal"),
TextWidget(c, "y"),
)
space = SpaceWidget(c, 0, 30)
cstack = StackWidget(c, cb, ct3, space, co, equation, align="center")
prompt_msg = TextWidget(
c, "try clicking\nand dragging", draggable=1, justify="center"
)
cs = SequenceWidget(c, cstack, prompt_msg)
zz = BracketWidget(c, cs, color="green4", width=3)
cf.add_widget(zz, 60, 30)
cb.bind_click(fill)
ct.bind_click(color)
co.bind_click(fill)
ct2.bind_click(color)
ct3.bind_click(color)
cf.mainloop()
# ShowText(None, 'title', ((('this is text'*150)+'\n')*5)) | A simple demonstration showing how to use canvas widgets. |
170,561 | from tkinter import IntVar, Menu, Tk
from nltk.draw.util import (
BoxWidget,
CanvasFrame,
CanvasWidget,
OvalWidget,
ParenWidget,
TextWidget,
)
from nltk.tree import Tree
from nltk.util import in_idle
def tree_to_treesegment(
canvas, t, make_node=TextWidget, make_leaf=TextWidget, **attribs
):
class TreeWidget(CanvasWidget):
def __init__(
self, canvas, t, make_node=TextWidget, make_leaf=TextWidget, **attribs
):
def expanded_tree(self, *path_to_tree):
def collapsed_tree(self, *path_to_tree):
def bind_click_trees(self, callback, button=1):
def bind_drag_trees(self, callback, button=1):
def bind_click_leaves(self, callback, button=1):
def bind_drag_leaves(self, callback, button=1):
def bind_click_nodes(self, callback, button=1):
def bind_drag_nodes(self, callback, button=1):
def _make_collapsed_trees(self, canvas, t, key):
def _make_expanded_tree(self, canvas, t, key):
def __setitem__(self, attr, value):
def __getitem__(self, attr):
def _tags(self):
def _manage(self):
def toggle_collapsed(self, treeseg):
class TextWidget(CanvasWidget):
def __init__(self, canvas, text, **attribs):
def __setitem__(self, attr, value):
def __getitem__(self, attr):
def _tags(self):
def text(self):
def set_text(self, text):
def __repr__(self):
class BoxWidget(AbstractContainerWidget):
def __init__(self, canvas, child, **attribs):
def __setitem__(self, attr, value):
def __getitem__(self, attr):
def _update(self, child):
def _tags(self):
class OvalWidget(AbstractContainerWidget):
def __init__(self, canvas, child, **attribs):
def __setitem__(self, attr, value):
def __getitem__(self, attr):
def _update(self, child):
def _tags(self):
class ParenWidget(AbstractContainerWidget):
def __init__(self, canvas, child, **attribs):
def __setitem__(self, attr, value):
def __getitem__(self, attr):
def _update(self, child):
def _tags(self):
class CanvasFrame:
def __init__(self, parent=None, **kw):
def _init_menubar(self):
def print_to_file(self, filename=None):
def scrollregion(self):
def canvas(self):
def add_widget(self, canvaswidget, x=None, y=None):
def _find_room(self, widget, desired_x, desired_y):
def destroy_widget(self, canvaswidget):
def remove_widget(self, canvaswidget):
def pack(self, cnf={}, **kw):
def destroy(self, *e):
def mainloop(self, *args, **kwargs):
def demo():
import random
def fill(cw):
cw["fill"] = "#%06d" % random.randint(0, 999999)
cf = CanvasFrame(width=550, height=450, closeenough=2)
t = Tree.fromstring(
"""
(S (NP the very big cat)
(VP (Adv sorta) (V saw) (NP (Det the) (N dog))))"""
)
tc = TreeWidget(
cf.canvas(),
t,
draggable=1,
node_font=("helvetica", -14, "bold"),
leaf_font=("helvetica", -12, "italic"),
roof_fill="white",
roof_color="black",
leaf_color="green4",
node_color="blue2",
)
cf.add_widget(tc, 10, 10)
def boxit(canvas, text):
big = ("helvetica", -16, "bold")
return BoxWidget(canvas, TextWidget(canvas, text, font=big), fill="green")
def ovalit(canvas, text):
return OvalWidget(canvas, TextWidget(canvas, text), fill="cyan")
treetok = Tree.fromstring("(S (NP this tree) (VP (V is) (AdjP shapeable)))")
tc2 = TreeWidget(cf.canvas(), treetok, boxit, ovalit, shapeable=1)
def color(node):
node["color"] = "#%04d00" % random.randint(0, 9999)
def color2(treeseg):
treeseg.label()["fill"] = "#%06d" % random.randint(0, 9999)
treeseg.label().child()["color"] = "white"
tc.bind_click_trees(tc.toggle_collapsed)
tc2.bind_click_trees(tc2.toggle_collapsed)
tc.bind_click_nodes(color, 3)
tc2.expanded_tree(1).bind_click(color2, 3)
tc2.expanded_tree().bind_click(color2, 3)
paren = ParenWidget(cf.canvas(), tc2)
cf.add_widget(paren, tc.bbox()[2] + 10, 10)
tree3 = Tree.fromstring(
"""
(S (NP this tree) (AUX was)
(VP (V built) (PP (P with) (NP (N tree_to_treesegment)))))"""
)
tc3 = tree_to_treesegment(
cf.canvas(), tree3, tree_color="green4", tree_xspace=2, tree_width=2
)
tc3["draggable"] = 1
cf.add_widget(tc3, 10, tc.bbox()[3] + 10)
def orientswitch(treewidget):
if treewidget["orientation"] == "horizontal":
treewidget.expanded_tree(1, 1).subtrees()[0].set_text("vertical")
treewidget.collapsed_tree(1, 1).subtrees()[0].set_text("vertical")
treewidget.collapsed_tree(1).subtrees()[1].set_text("vertical")
treewidget.collapsed_tree().subtrees()[3].set_text("vertical")
treewidget["orientation"] = "vertical"
else:
treewidget.expanded_tree(1, 1).subtrees()[0].set_text("horizontal")
treewidget.collapsed_tree(1, 1).subtrees()[0].set_text("horizontal")
treewidget.collapsed_tree(1).subtrees()[1].set_text("horizontal")
treewidget.collapsed_tree().subtrees()[3].set_text("horizontal")
treewidget["orientation"] = "horizontal"
text = """
Try clicking, right clicking, and dragging
different elements of each of the trees.
The top-left tree is a TreeWidget built from
a Tree. The top-right is a TreeWidget built
from a Tree, using non-default widget
constructors for the nodes & leaves (BoxWidget
and OvalWidget). The bottom-left tree is
built from tree_to_treesegment."""
twidget = TextWidget(cf.canvas(), text.strip())
textbox = BoxWidget(cf.canvas(), twidget, fill="white", draggable=1)
cf.add_widget(textbox, tc3.bbox()[2] + 10, tc2.bbox()[3] + 10)
tree4 = Tree.fromstring("(S (NP this tree) (VP (V is) (Adj horizontal)))")
tc4 = TreeWidget(
cf.canvas(),
tree4,
draggable=1,
line_color="brown2",
roof_color="brown2",
node_font=("helvetica", -12, "bold"),
node_color="brown4",
orientation="horizontal",
)
tc4.manage()
cf.add_widget(tc4, tc3.bbox()[2] + 10, textbox.bbox()[3] + 10)
tc4.bind_click(orientswitch)
tc4.bind_click_trees(tc4.toggle_collapsed, 3)
# Run mainloop
cf.mainloop() | null |
170,562 | import operator
from tkinter import Frame, Label, Listbox, Scrollbar, Tk
class Table:
"""
A display widget for a table of values, based on a ``MultiListbox``
widget. For many purposes, ``Table`` can be treated as a
list-of-lists. E.g., table[i] is a list of the values for row i;
and table.append(row) adds a new row with the given list of
values. Individual cells can be accessed using table[i,j], which
refers to the j-th column of the i-th row. This can be used to
both read and write values from the table. E.g.:
>>> table[i,j] = 'hello' # doctest: +SKIP
The column (j) can be given either as an index number, or as a
column name. E.g., the following prints the value in the 3rd row
for the 'First Name' column:
>>> print(table[3, 'First Name']) # doctest: +SKIP
John
You can configure the colors for individual rows, columns, or
cells using ``rowconfig()``, ``columnconfig()``, and ``itemconfig()``.
The color configuration for each row will be preserved if the
table is modified; however, when new rows are added, any color
configurations that have been made for *columns* will not be
applied to the new row.
Note: Although ``Table`` acts like a widget in some ways (e.g., it
defines ``grid()``, ``pack()``, and ``bind()``), it is not itself a
widget; it just contains one. This is because widgets need to
define ``__getitem__()``, ``__setitem__()``, and ``__nonzero__()`` in
a way that's incompatible with the fact that ``Table`` behaves as a
list-of-lists.
:ivar _mlb: The multi-column listbox used to display this table's data.
:ivar _rows: A list-of-lists used to hold the cell values of this
table. Each element of _rows is a row value, i.e., a list of
cell values, one for each column in the row.
"""
def __init__(
self,
master,
column_names,
rows=None,
column_weights=None,
scrollbar=True,
click_to_sort=True,
reprfunc=None,
cnf={},
**kw
):
"""
Construct a new Table widget.
:type master: Tkinter.Widget
:param master: The widget that should contain the new table.
:type column_names: list(str)
:param column_names: A list of names for the columns; these
names will be used to create labels for each column;
and can be used as an index when reading or writing
cell values from the table.
:type rows: list(list)
:param rows: A list of row values used to initialize the table.
Each row value should be a tuple of cell values, one for
each column in the row.
:type scrollbar: bool
:param scrollbar: If true, then create a scrollbar for the
new table widget.
:type click_to_sort: bool
:param click_to_sort: If true, then create bindings that will
sort the table's rows by a given column's values if the
user clicks on that colum's label.
:type reprfunc: function
:param reprfunc: If specified, then use this function to
convert each table cell value to a string suitable for
display. ``reprfunc`` has the following signature:
reprfunc(row_index, col_index, cell_value) -> str
(Note that the column is specified by index, not by name.)
:param cnf, kw: Configuration parameters for this widget's
contained ``MultiListbox``. See ``MultiListbox.__init__()``
for details.
"""
self._num_columns = len(column_names)
self._reprfunc = reprfunc
self._frame = Frame(master)
self._column_name_to_index = {c: i for (i, c) in enumerate(column_names)}
# Make a copy of the rows & check that it's valid.
if rows is None:
self._rows = []
else:
self._rows = [[v for v in row] for row in rows]
for row in self._rows:
self._checkrow(row)
# Create our multi-list box.
self._mlb = MultiListbox(self._frame, column_names, column_weights, cnf, **kw)
self._mlb.pack(side="left", expand=True, fill="both")
# Optional scrollbar
if scrollbar:
sb = Scrollbar(self._frame, orient="vertical", command=self._mlb.yview)
self._mlb.listboxes[0]["yscrollcommand"] = sb.set
# for listbox in self._mlb.listboxes:
# listbox['yscrollcommand'] = sb.set
sb.pack(side="right", fill="y")
self._scrollbar = sb
# Set up sorting
self._sortkey = None
if click_to_sort:
for i, l in enumerate(self._mlb.column_labels):
l.bind("<Button-1>", self._sort)
# Fill in our multi-list box.
self._fill_table()
# /////////////////////////////////////////////////////////////////
# { Widget-like Methods
# /////////////////////////////////////////////////////////////////
# These all just delegate to either our frame or our MLB.
def pack(self, *args, **kwargs):
"""Position this table's main frame widget in its parent
widget. See ``Tkinter.Frame.pack()`` for more info."""
self._frame.pack(*args, **kwargs)
def grid(self, *args, **kwargs):
"""Position this table's main frame widget in its parent
widget. See ``Tkinter.Frame.grid()`` for more info."""
self._frame.grid(*args, **kwargs)
def focus(self):
"""Direct (keyboard) input foxus to this widget."""
self._mlb.focus()
def bind(self, sequence=None, func=None, add=None):
"""Add a binding to this table's main frame that will call
``func`` in response to the event sequence."""
self._mlb.bind(sequence, func, add)
def rowconfigure(self, row_index, cnf={}, **kw):
""":see: ``MultiListbox.rowconfigure()``"""
self._mlb.rowconfigure(row_index, cnf, **kw)
def columnconfigure(self, col_index, cnf={}, **kw):
""":see: ``MultiListbox.columnconfigure()``"""
col_index = self.column_index(col_index)
self._mlb.columnconfigure(col_index, cnf, **kw)
def itemconfigure(self, row_index, col_index, cnf=None, **kw):
""":see: ``MultiListbox.itemconfigure()``"""
col_index = self.column_index(col_index)
return self._mlb.itemconfigure(row_index, col_index, cnf, **kw)
def bind_to_labels(self, sequence=None, func=None, add=None):
""":see: ``MultiListbox.bind_to_labels()``"""
return self._mlb.bind_to_labels(sequence, func, add)
def bind_to_listboxes(self, sequence=None, func=None, add=None):
""":see: ``MultiListbox.bind_to_listboxes()``"""
return self._mlb.bind_to_listboxes(sequence, func, add)
def bind_to_columns(self, sequence=None, func=None, add=None):
""":see: ``MultiListbox.bind_to_columns()``"""
return self._mlb.bind_to_columns(sequence, func, add)
rowconfig = rowconfigure
columnconfig = columnconfigure
itemconfig = itemconfigure
# /////////////////////////////////////////////////////////////////
# { Table as list-of-lists
# /////////////////////////////////////////////////////////////////
def insert(self, row_index, rowvalue):
"""
Insert a new row into the table, so that its row index will be
``row_index``. If the table contains any rows whose row index
is greater than or equal to ``row_index``, then they will be
shifted down.
:param rowvalue: A tuple of cell values, one for each column
in the new row.
"""
self._checkrow(rowvalue)
self._rows.insert(row_index, rowvalue)
if self._reprfunc is not None:
rowvalue = [
self._reprfunc(row_index, j, v) for (j, v) in enumerate(rowvalue)
]
self._mlb.insert(row_index, rowvalue)
if self._DEBUG:
self._check_table_vs_mlb()
def extend(self, rowvalues):
"""
Add new rows at the end of the table.
:param rowvalues: A list of row values used to initialize the
table. Each row value should be a tuple of cell values,
one for each column in the row.
"""
for rowvalue in rowvalues:
self.append(rowvalue)
if self._DEBUG:
self._check_table_vs_mlb()
def append(self, rowvalue):
"""
Add a new row to the end of the table.
:param rowvalue: A tuple of cell values, one for each column
in the new row.
"""
self.insert(len(self._rows), rowvalue)
if self._DEBUG:
self._check_table_vs_mlb()
def clear(self):
"""
Delete all rows in this table.
"""
self._rows = []
self._mlb.delete(0, "end")
if self._DEBUG:
self._check_table_vs_mlb()
def __getitem__(self, index):
"""
Return the value of a row or a cell in this table. If
``index`` is an integer, then the row value for the ``index``th
row. This row value consists of a tuple of cell values, one
for each column in the row. If ``index`` is a tuple of two
integers, ``(i,j)``, then return the value of the cell in the
``i``th row and the ``j``th column.
"""
if isinstance(index, slice):
raise ValueError("Slicing not supported")
elif isinstance(index, tuple) and len(index) == 2:
return self._rows[index[0]][self.column_index(index[1])]
else:
return tuple(self._rows[index])
def __setitem__(self, index, val):
"""
Replace the value of a row or a cell in this table with
``val``.
If ``index`` is an integer, then ``val`` should be a row value
(i.e., a tuple of cell values, one for each column). In this
case, the values of the ``index``th row of the table will be
replaced with the values in ``val``.
If ``index`` is a tuple of integers, ``(i,j)``, then replace the
value of the cell in the ``i``th row and ``j``th column with
``val``.
"""
if isinstance(index, slice):
raise ValueError("Slicing not supported")
# table[i,j] = val
elif isinstance(index, tuple) and len(index) == 2:
i, j = index[0], self.column_index(index[1])
config_cookie = self._save_config_info([i])
self._rows[i][j] = val
if self._reprfunc is not None:
val = self._reprfunc(i, j, val)
self._mlb.listboxes[j].insert(i, val)
self._mlb.listboxes[j].delete(i + 1)
self._restore_config_info(config_cookie)
# table[i] = val
else:
config_cookie = self._save_config_info([index])
self._checkrow(val)
self._rows[index] = list(val)
if self._reprfunc is not None:
val = [self._reprfunc(index, j, v) for (j, v) in enumerate(val)]
self._mlb.insert(index, val)
self._mlb.delete(index + 1)
self._restore_config_info(config_cookie)
def __delitem__(self, row_index):
"""
Delete the ``row_index``th row from this table.
"""
if isinstance(row_index, slice):
raise ValueError("Slicing not supported")
if isinstance(row_index, tuple) and len(row_index) == 2:
raise ValueError("Cannot delete a single cell!")
del self._rows[row_index]
self._mlb.delete(row_index)
if self._DEBUG:
self._check_table_vs_mlb()
def __len__(self):
"""
:return: the number of rows in this table.
"""
return len(self._rows)
def _checkrow(self, rowvalue):
"""
Helper function: check that a given row value has the correct
number of elements; and if not, raise an exception.
"""
if len(rowvalue) != self._num_columns:
raise ValueError(
"Row %r has %d columns; expected %d"
% (rowvalue, len(rowvalue), self._num_columns)
)
# /////////////////////////////////////////////////////////////////
# Columns
# /////////////////////////////////////////////////////////////////
def column_names(self):
"""A list of the names of the columns in this table."""
return self._mlb.column_names
def column_index(self, i):
"""
If ``i`` is a valid column index integer, then return it as is.
Otherwise, check if ``i`` is used as the name for any column;
if so, return that column's index. Otherwise, raise a
``KeyError`` exception.
"""
if isinstance(i, int) and 0 <= i < self._num_columns:
return i
else:
# This raises a key error if the column is not found.
return self._column_name_to_index[i]
def hide_column(self, column_index):
""":see: ``MultiListbox.hide_column()``"""
self._mlb.hide_column(self.column_index(column_index))
def show_column(self, column_index):
""":see: ``MultiListbox.show_column()``"""
self._mlb.show_column(self.column_index(column_index))
# /////////////////////////////////////////////////////////////////
# Selection
# /////////////////////////////////////////////////////////////////
def selected_row(self):
"""
Return the index of the currently selected row, or None if
no row is selected. To get the row value itself, use
``table[table.selected_row()]``.
"""
sel = self._mlb.curselection()
if sel:
return int(sel[0])
else:
return None
def select(self, index=None, delta=None, see=True):
""":see: ``MultiListbox.select()``"""
self._mlb.select(index, delta, see)
# /////////////////////////////////////////////////////////////////
# Sorting
# /////////////////////////////////////////////////////////////////
def sort_by(self, column_index, order="toggle"):
"""
Sort the rows in this table, using the specified column's
values as a sort key.
:param column_index: Specifies which column to sort, using
either a column index (int) or a column's label name
(str).
:param order: Specifies whether to sort the values in
ascending or descending order:
- ``'ascending'``: Sort from least to greatest.
- ``'descending'``: Sort from greatest to least.
- ``'toggle'``: If the most recent call to ``sort_by()``
sorted the table by the same column (``column_index``),
then reverse the rows; otherwise sort in ascending
order.
"""
if order not in ("ascending", "descending", "toggle"):
raise ValueError(
'sort_by(): order should be "ascending", ' '"descending", or "toggle".'
)
column_index = self.column_index(column_index)
config_cookie = self._save_config_info(index_by_id=True)
# Sort the rows.
if order == "toggle" and column_index == self._sortkey:
self._rows.reverse()
else:
self._rows.sort(
key=operator.itemgetter(column_index), reverse=(order == "descending")
)
self._sortkey = column_index
# Redraw the table.
self._fill_table()
self._restore_config_info(config_cookie, index_by_id=True, see=True)
if self._DEBUG:
self._check_table_vs_mlb()
def _sort(self, event):
"""Event handler for clicking on a column label -- sort by
that column."""
column_index = event.widget.column_index
# If they click on the far-left of far-right of a column's
# label, then resize rather than sorting.
if self._mlb._resize_column(event):
return "continue"
# Otherwise, sort.
else:
self.sort_by(column_index)
return "continue"
# /////////////////////////////////////////////////////////////////
# { Table Drawing Helpers
# /////////////////////////////////////////////////////////////////
def _fill_table(self, save_config=True):
"""
Re-draw the table from scratch, by clearing out the table's
multi-column listbox; and then filling it in with values from
``self._rows``. Note that any cell-, row-, or column-specific
color configuration that has been done will be lost. The
selection will also be lost -- i.e., no row will be selected
after this call completes.
"""
self._mlb.delete(0, "end")
for i, row in enumerate(self._rows):
if self._reprfunc is not None:
row = [self._reprfunc(i, j, v) for (j, v) in enumerate(row)]
self._mlb.insert("end", row)
def _get_itemconfig(self, r, c):
return {
k: self._mlb.itemconfig(r, c, k)[-1]
for k in (
"foreground",
"selectforeground",
"background",
"selectbackground",
)
}
def _save_config_info(self, row_indices=None, index_by_id=False):
"""
Return a 'cookie' containing information about which row is
selected, and what color configurations have been applied.
this information can the be re-applied to the table (after
making modifications) using ``_restore_config_info()``. Color
configuration information will be saved for any rows in
``row_indices``, or in the entire table, if
``row_indices=None``. If ``index_by_id=True``, the the cookie
will associate rows with their configuration information based
on the rows' python id. This is useful when performing
operations that re-arrange the rows (e.g. ``sort``). If
``index_by_id=False``, then it is assumed that all rows will be
in the same order when ``_restore_config_info()`` is called.
"""
# Default value for row_indices is all rows.
if row_indices is None:
row_indices = list(range(len(self._rows)))
# Look up our current selection.
selection = self.selected_row()
if index_by_id and selection is not None:
selection = id(self._rows[selection])
# Look up the color configuration info for each row.
if index_by_id:
config = {
id(self._rows[r]): [
self._get_itemconfig(r, c) for c in range(self._num_columns)
]
for r in row_indices
}
else:
config = {
r: [self._get_itemconfig(r, c) for c in range(self._num_columns)]
for r in row_indices
}
return selection, config
def _restore_config_info(self, cookie, index_by_id=False, see=False):
"""
Restore selection & color configuration information that was
saved using ``_save_config_info``.
"""
selection, config = cookie
# Clear the selection.
if selection is None:
self._mlb.selection_clear(0, "end")
# Restore selection & color config
if index_by_id:
for r, row in enumerate(self._rows):
if id(row) in config:
for c in range(self._num_columns):
self._mlb.itemconfigure(r, c, config[id(row)][c])
if id(row) == selection:
self._mlb.select(r, see=see)
else:
if selection is not None:
self._mlb.select(selection, see=see)
for r in config:
for c in range(self._num_columns):
self._mlb.itemconfigure(r, c, config[r][c])
# /////////////////////////////////////////////////////////////////
# Debugging (Invariant Checker)
# /////////////////////////////////////////////////////////////////
_DEBUG = False
"""If true, then run ``_check_table_vs_mlb()`` after any operation
that modifies the table."""
def _check_table_vs_mlb(self):
"""
Verify that the contents of the table's ``_rows`` variable match
the contents of its multi-listbox (``_mlb``). This is just
included for debugging purposes, to make sure that the
list-modifying operations are working correctly.
"""
for col in self._mlb.listboxes:
assert len(self) == col.size()
for row in self:
assert len(row) == self._num_columns
assert self._num_columns == len(self._mlb.column_names)
# assert self._column_names == self._mlb.column_names
for i, row in enumerate(self):
for j, cell in enumerate(row):
if self._reprfunc is not None:
cell = self._reprfunc(i, j, cell)
assert self._mlb.get(i)[j] == cell
brown: CategorizedTaggedCorpusReader = LazyCorpusLoader(
"brown",
CategorizedTaggedCorpusReader,
r"c[a-z]\d\d",
cat_file="cats.txt",
tagset="brown",
encoding="ascii",
)
wordnet: WordNetCorpusReader = LazyCorpusLoader(
"wordnet",
WordNetCorpusReader,
LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"),
)
def demo():
root = Tk()
root.bind("<Control-q>", lambda e: root.destroy())
table = Table(
root,
"Word Synset Hypernym Hyponym".split(),
column_weights=[0, 1, 1, 1],
reprfunc=(lambda i, j, s: " %s" % s),
)
table.pack(expand=True, fill="both")
from nltk.corpus import brown, wordnet
for word, pos in sorted(set(brown.tagged_words()[:500])):
if pos[0] != "N":
continue
word = word.lower()
for synset in wordnet.synsets(word):
try:
hyper_def = synset.hypernyms()[0].definition()
except:
hyper_def = "*none*"
try:
hypo_def = synset.hypernyms()[0].definition()
except:
hypo_def = "*none*"
table.append([word, synset.definition(), hyper_def, hypo_def])
table.columnconfig("Word", background="#afa")
table.columnconfig("Synset", background="#efe")
table.columnconfig("Hypernym", background="#fee")
table.columnconfig("Hyponym", background="#ffe")
for row in range(len(table)):
for column in ("Hypernym", "Hyponym"):
if table[row, column] == "*none*":
table.itemconfig(
row, column, foreground="#666", selectforeground="#666"
)
root.mainloop() | null |
170,563 |
The provided code snippet includes necessary dependencies for implementing the `dispersion_plot` function. Write a Python function `def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot")` to solve the following problem:
Generate a lexical dispersion plot. :param text: The source text :type text: list(str) or iter(str) :param words: The target words :type words: list of str :param ignore_case: flag to set if case should be ignored when searching text :type ignore_case: bool :return: a matplotlib Axes object that may still be modified before plotting :rtype: Axes
Here is the function:
def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"):
"""
Generate a lexical dispersion plot.
:param text: The source text
:type text: list(str) or iter(str)
:param words: The target words
:type words: list of str
:param ignore_case: flag to set if case should be ignored when searching text
:type ignore_case: bool
:return: a matplotlib Axes object that may still be modified before plotting
:rtype: Axes
"""
try:
import matplotlib.pyplot as plt
except ImportError as e:
raise ImportError(
"The plot function requires matplotlib to be installed. "
"See https://matplotlib.org/"
) from e
word2y = {
word.casefold() if ignore_case else word: y
for y, word in enumerate(reversed(words))
}
xs, ys = [], []
for x, token in enumerate(text):
token = token.casefold() if ignore_case else token
y = word2y.get(token)
if y is not None:
xs.append(x)
ys.append(y)
_, ax = plt.subplots()
ax.plot(xs, ys, "|")
ax.set_yticks(list(range(len(words))), words, color="C0")
ax.set_ylim(-1, len(words))
ax.set_title(title)
ax.set_xlabel("Word Offset")
return ax | Generate a lexical dispersion plot. :param text: The source text :type text: list(str) or iter(str) :param words: The target words :type words: list of str :param ignore_case: flag to set if case should be ignored when searching text :type ignore_case: bool :return: a matplotlib Axes object that may still be modified before plotting :rtype: Axes |
170,564 | import re
from tkinter import (
Button,
Canvas,
Entry,
Frame,
IntVar,
Label,
Scrollbar,
Text,
Tk,
Toplevel,
)
from nltk.draw.tree import TreeSegmentWidget, tree_to_treesegment
from nltk.draw.util import (
CanvasFrame,
ColorizedList,
ShowText,
SymbolWidget,
TextWidget,
)
from nltk.grammar import CFG, Nonterminal, _read_cfg_production, nonterminals
from nltk.tree import Tree
class CFGDemo:
def __init__(self, grammar, text):
self._grammar = grammar
self._text = text
# Set up the main window.
self._top = Tk()
self._top.title("Context Free Grammar Demo")
# Base font size
self._size = IntVar(self._top)
self._size.set(12) # = medium
# Set up the key bindings
self._init_bindings(self._top)
# Create the basic frames
frame1 = Frame(self._top)
frame1.pack(side="left", fill="y", expand=0)
self._init_menubar(self._top)
self._init_buttons(self._top)
self._init_grammar(frame1)
self._init_treelet(frame1)
self._init_workspace(self._top)
# //////////////////////////////////////////////////
# Initialization
# //////////////////////////////////////////////////
def _init_bindings(self, top):
top.bind("<Control-q>", self.destroy)
def _init_menubar(self, parent):
pass
def _init_buttons(self, parent):
pass
def _init_grammar(self, parent):
self._prodlist = ProductionList(parent, self._grammar, width=20)
self._prodlist.pack(side="top", fill="both", expand=1)
self._prodlist.focus()
self._prodlist.add_callback("select", self._selectprod_cb)
self._prodlist.add_callback("move", self._selectprod_cb)
def _init_treelet(self, parent):
self._treelet_canvas = Canvas(parent, background="white")
self._treelet_canvas.pack(side="bottom", fill="x")
self._treelet = None
def _init_workspace(self, parent):
self._workspace = CanvasFrame(parent, background="white")
self._workspace.pack(side="right", fill="both", expand=1)
self._tree = None
self.reset_workspace()
# //////////////////////////////////////////////////
# Workspace
# //////////////////////////////////////////////////
def reset_workspace(self):
c = self._workspace.canvas()
fontsize = int(self._size.get())
node_font = ("helvetica", -(fontsize + 4), "bold")
leaf_font = ("helvetica", -(fontsize + 2))
# Remove the old tree
if self._tree is not None:
self._workspace.remove_widget(self._tree)
# The root of the tree.
start = self._grammar.start().symbol()
rootnode = TextWidget(c, start, font=node_font, draggable=1)
# The leaves of the tree.
leaves = []
for word in self._text:
leaves.append(TextWidget(c, word, font=leaf_font, draggable=1))
# Put it all together into one tree
self._tree = TreeSegmentWidget(c, rootnode, leaves, color="white")
# Add it to the workspace.
self._workspace.add_widget(self._tree)
# Move the leaves to the bottom of the workspace.
for leaf in leaves:
leaf.move(0, 100)
# self._nodes = {start:1}
# self._leaves = dict([(l,1) for l in leaves])
def workspace_markprod(self, production):
pass
def _markproduction(self, prod, tree=None):
if tree is None:
tree = self._tree
for i in range(len(tree.subtrees()) - len(prod.rhs())):
if tree["color", i] == "white":
self._markproduction # FIXME: Is this necessary at all?
for j, node in enumerate(prod.rhs()):
widget = tree.subtrees()[i + j]
if (
isinstance(node, Nonterminal)
and isinstance(widget, TreeSegmentWidget)
and node.symbol == widget.label().text()
):
pass # matching nonterminal
elif (
isinstance(node, str)
and isinstance(widget, TextWidget)
and node == widget.text()
):
pass # matching nonterminal
else:
break
else:
# Everything matched!
print("MATCH AT", i)
# //////////////////////////////////////////////////
# Grammar
# //////////////////////////////////////////////////
def _selectprod_cb(self, production):
canvas = self._treelet_canvas
self._prodlist.highlight(production)
if self._treelet is not None:
self._treelet.destroy()
# Convert the production to a tree.
rhs = production.rhs()
for (i, elt) in enumerate(rhs):
if isinstance(elt, Nonterminal):
elt = Tree(elt)
tree = Tree(production.lhs().symbol(), *rhs)
# Draw the tree in the treelet area.
fontsize = int(self._size.get())
node_font = ("helvetica", -(fontsize + 4), "bold")
leaf_font = ("helvetica", -(fontsize + 2))
self._treelet = tree_to_treesegment(
canvas, tree, node_font=node_font, leaf_font=leaf_font
)
self._treelet["draggable"] = 1
# Center the treelet.
(x1, y1, x2, y2) = self._treelet.bbox()
w, h = int(canvas["width"]), int(canvas["height"])
self._treelet.move((w - x1 - x2) / 2, (h - y1 - y2) / 2)
# Mark the places where we can add it to the workspace.
self._markproduction(production)
def destroy(self, *args):
self._top.destroy()
def mainloop(self, *args, **kwargs):
self._top.mainloop(*args, **kwargs)
class Nonterminal:
"""
A non-terminal symbol for a context free grammar. ``Nonterminal``
is a wrapper class for node values; it is used by ``Production``
objects to distinguish node values from leaf values.
The node value that is wrapped by a ``Nonterminal`` is known as its
"symbol". Symbols are typically strings representing phrasal
categories (such as ``"NP"`` or ``"VP"``). However, more complex
symbol types are sometimes used (e.g., for lexicalized grammars).
Since symbols are node values, they must be immutable and
hashable. Two ``Nonterminals`` are considered equal if their
symbols are equal.
:see: ``CFG``, ``Production``
:type _symbol: any
:ivar _symbol: The node value corresponding to this
``Nonterminal``. This value must be immutable and hashable.
"""
def __init__(self, symbol):
"""
Construct a new non-terminal from the given symbol.
:type symbol: any
:param symbol: The node value corresponding to this
``Nonterminal``. This value must be immutable and
hashable.
"""
self._symbol = symbol
def symbol(self):
"""
Return the node value corresponding to this ``Nonterminal``.
:rtype: (any)
"""
return self._symbol
def __eq__(self, other):
"""
Return True if this non-terminal is equal to ``other``. In
particular, return True if ``other`` is a ``Nonterminal``
and this non-terminal's symbol is equal to ``other`` 's symbol.
:rtype: bool
"""
return type(self) == type(other) and self._symbol == other._symbol
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, Nonterminal):
raise_unorderable_types("<", self, other)
return self._symbol < other._symbol
def __hash__(self):
return hash(self._symbol)
def __repr__(self):
"""
Return a string representation for this ``Nonterminal``.
:rtype: str
"""
if isinstance(self._symbol, str):
return "%s" % self._symbol
else:
return "%s" % repr(self._symbol)
def __str__(self):
"""
Return a string representation for this ``Nonterminal``.
:rtype: str
"""
if isinstance(self._symbol, str):
return "%s" % self._symbol
else:
return "%s" % repr(self._symbol)
def __div__(self, rhs):
"""
Return a new nonterminal whose symbol is ``A/B``, where ``A`` is
the symbol for this nonterminal, and ``B`` is the symbol for rhs.
:param rhs: The nonterminal used to form the right hand side
of the new nonterminal.
:type rhs: Nonterminal
:rtype: Nonterminal
"""
return Nonterminal(f"{self._symbol}/{rhs._symbol}")
def __truediv__(self, rhs):
"""
Return a new nonterminal whose symbol is ``A/B``, where ``A`` is
the symbol for this nonterminal, and ``B`` is the symbol for rhs.
This function allows use of the slash ``/`` operator with
the future import of division.
:param rhs: The nonterminal used to form the right hand side
of the new nonterminal.
:type rhs: Nonterminal
:rtype: Nonterminal
"""
return self.__div__(rhs)
def nonterminals(symbols):
"""
Given a string containing a list of symbol names, return a list of
``Nonterminals`` constructed from those symbols.
:param symbols: The symbol name string. This string can be
delimited by either spaces or commas.
:type symbols: str
:return: A list of ``Nonterminals`` constructed from the symbol
names given in ``symbols``. The ``Nonterminals`` are sorted
in the same order as the symbols names.
:rtype: list(Nonterminal)
"""
if "," in symbols:
symbol_list = symbols.split(",")
else:
symbol_list = symbols.split()
return [Nonterminal(s.strip()) for s in symbol_list]
class CFG:
"""
A context-free grammar. A grammar consists of a start state and
a set of productions. The set of terminals and nonterminals is
implicitly specified by the productions.
If you need efficient key-based access to productions, you
can use a subclass to implement it.
"""
def __init__(self, start, productions, calculate_leftcorners=True):
"""
Create a new context-free grammar, from the given start state
and set of ``Production`` instances.
:param start: The start symbol
:type start: Nonterminal
:param productions: The list of productions that defines the grammar
:type productions: list(Production)
:param calculate_leftcorners: False if we don't want to calculate the
leftcorner relation. In that case, some optimized chart parsers won't work.
:type calculate_leftcorners: bool
"""
if not is_nonterminal(start):
raise TypeError(
"start should be a Nonterminal object,"
" not a %s" % type(start).__name__
)
self._start = start
self._productions = productions
self._categories = {prod.lhs() for prod in productions}
self._calculate_indexes()
self._calculate_grammar_forms()
if calculate_leftcorners:
self._calculate_leftcorners()
def _calculate_indexes(self):
self._lhs_index = {}
self._rhs_index = {}
self._empty_index = {}
self._lexical_index = {}
for prod in self._productions:
# Left hand side.
lhs = prod._lhs
if lhs not in self._lhs_index:
self._lhs_index[lhs] = []
self._lhs_index[lhs].append(prod)
if prod._rhs:
# First item in right hand side.
rhs0 = prod._rhs[0]
if rhs0 not in self._rhs_index:
self._rhs_index[rhs0] = []
self._rhs_index[rhs0].append(prod)
else:
# The right hand side is empty.
self._empty_index[prod.lhs()] = prod
# Lexical tokens in the right hand side.
for token in prod._rhs:
if is_terminal(token):
self._lexical_index.setdefault(token, set()).add(prod)
def _calculate_leftcorners(self):
# Calculate leftcorner relations, for use in optimized parsing.
self._immediate_leftcorner_categories = {cat: {cat} for cat in self._categories}
self._immediate_leftcorner_words = {cat: set() for cat in self._categories}
for prod in self.productions():
if len(prod) > 0:
cat, left = prod.lhs(), prod.rhs()[0]
if is_nonterminal(left):
self._immediate_leftcorner_categories[cat].add(left)
else:
self._immediate_leftcorner_words[cat].add(left)
lc = transitive_closure(self._immediate_leftcorner_categories, reflexive=True)
self._leftcorners = lc
self._leftcorner_parents = invert_graph(lc)
nr_leftcorner_categories = sum(
map(len, self._immediate_leftcorner_categories.values())
)
nr_leftcorner_words = sum(map(len, self._immediate_leftcorner_words.values()))
if nr_leftcorner_words > nr_leftcorner_categories > 10000:
# If the grammar is big, the leftcorner-word dictionary will be too large.
# In that case it is better to calculate the relation on demand.
self._leftcorner_words = None
return
self._leftcorner_words = {}
for cat in self._leftcorners:
lefts = self._leftcorners[cat]
lc = self._leftcorner_words[cat] = set()
for left in lefts:
lc.update(self._immediate_leftcorner_words.get(left, set()))
def fromstring(cls, input, encoding=None):
"""
Return the grammar instance corresponding to the input string(s).
:param input: a grammar, either in the form of a string or as a list of strings.
"""
start, productions = read_grammar(
input, standard_nonterm_parser, encoding=encoding
)
return cls(start, productions)
def start(self):
"""
Return the start symbol of the grammar
:rtype: Nonterminal
"""
return self._start
# tricky to balance readability and efficiency here!
# can't use set operations as they don't preserve ordering
def productions(self, lhs=None, rhs=None, empty=False):
"""
Return the grammar productions, filtered by the left-hand side
or the first item in the right-hand side.
:param lhs: Only return productions with the given left-hand side.
:param rhs: Only return productions with the given first item
in the right-hand side.
:param empty: Only return productions with an empty right-hand side.
:return: A list of productions matching the given constraints.
:rtype: list(Production)
"""
if rhs and empty:
raise ValueError(
"You cannot select empty and non-empty " "productions at the same time."
)
# no constraints so return everything
if not lhs and not rhs:
if not empty:
return self._productions
else:
return self._empty_index.values()
# only lhs specified so look up its index
elif lhs and not rhs:
if not empty:
return self._lhs_index.get(lhs, [])
elif lhs in self._empty_index:
return [self._empty_index[lhs]]
else:
return []
# only rhs specified so look up its index
elif rhs and not lhs:
return self._rhs_index.get(rhs, [])
# intersect
else:
return [
prod
for prod in self._lhs_index.get(lhs, [])
if prod in self._rhs_index.get(rhs, [])
]
def leftcorners(self, cat):
"""
Return the set of all nonterminals that the given nonterminal
can start with, including itself.
This is the reflexive, transitive closure of the immediate
leftcorner relation: (A > B) iff (A -> B beta)
:param cat: the parent of the leftcorners
:type cat: Nonterminal
:return: the set of all leftcorners
:rtype: set(Nonterminal)
"""
return self._leftcorners.get(cat, {cat})
def is_leftcorner(self, cat, left):
"""
True if left is a leftcorner of cat, where left can be a
terminal or a nonterminal.
:param cat: the parent of the leftcorner
:type cat: Nonterminal
:param left: the suggested leftcorner
:type left: Terminal or Nonterminal
:rtype: bool
"""
if is_nonterminal(left):
return left in self.leftcorners(cat)
elif self._leftcorner_words:
return left in self._leftcorner_words.get(cat, set())
else:
return any(
left in self._immediate_leftcorner_words.get(parent, set())
for parent in self.leftcorners(cat)
)
def leftcorner_parents(self, cat):
"""
Return the set of all nonterminals for which the given category
is a left corner. This is the inverse of the leftcorner relation.
:param cat: the suggested leftcorner
:type cat: Nonterminal
:return: the set of all parents to the leftcorner
:rtype: set(Nonterminal)
"""
return self._leftcorner_parents.get(cat, {cat})
def check_coverage(self, tokens):
"""
Check whether the grammar rules cover the given list of tokens.
If not, then raise an exception.
:type tokens: list(str)
"""
missing = [tok for tok in tokens if not self._lexical_index.get(tok)]
if missing:
missing = ", ".join(f"{w!r}" for w in missing)
raise ValueError(
"Grammar does not cover some of the " "input words: %r." % missing
)
def _calculate_grammar_forms(self):
"""
Pre-calculate of which form(s) the grammar is.
"""
prods = self._productions
self._is_lexical = all(p.is_lexical() for p in prods)
self._is_nonlexical = all(p.is_nonlexical() for p in prods if len(p) != 1)
self._min_len = min(len(p) for p in prods)
self._max_len = max(len(p) for p in prods)
self._all_unary_are_lexical = all(p.is_lexical() for p in prods if len(p) == 1)
def is_lexical(self):
"""
Return True if all productions are lexicalised.
"""
return self._is_lexical
def is_nonlexical(self):
"""
Return True if all lexical rules are "preterminals", that is,
unary rules which can be separated in a preprocessing step.
This means that all productions are of the forms
A -> B1 ... Bn (n>=0), or A -> "s".
Note: is_lexical() and is_nonlexical() are not opposites.
There are grammars which are neither, and grammars which are both.
"""
return self._is_nonlexical
def min_len(self):
"""
Return the right-hand side length of the shortest grammar production.
"""
return self._min_len
def max_len(self):
"""
Return the right-hand side length of the longest grammar production.
"""
return self._max_len
def is_nonempty(self):
"""
Return True if there are no empty productions.
"""
return self._min_len > 0
def is_binarised(self):
"""
Return True if all productions are at most binary.
Note that there can still be empty and unary productions.
"""
return self._max_len <= 2
def is_flexible_chomsky_normal_form(self):
"""
Return True if all productions are of the forms
A -> B C, A -> B, or A -> "s".
"""
return self.is_nonempty() and self.is_nonlexical() and self.is_binarised()
def is_chomsky_normal_form(self):
"""
Return True if the grammar is of Chomsky Normal Form, i.e. all productions
are of the form A -> B C, or A -> "s".
"""
return self.is_flexible_chomsky_normal_form() and self._all_unary_are_lexical
def chomsky_normal_form(self, new_token_padding="@$@", flexible=False):
"""
Returns a new Grammar that is in chomsky normal
:param: new_token_padding
Customise new rule formation during binarisation
"""
if self.is_chomsky_normal_form():
return self
if self.productions(empty=True):
raise ValueError(
"Grammar has Empty rules. " "Cannot deal with them at the moment"
)
# check for mixed rules
for rule in self.productions():
if rule.is_lexical() and len(rule.rhs()) > 1:
raise ValueError(
f"Cannot handled mixed rule {rule.lhs()} => {rule.rhs()}"
)
step1 = CFG.eliminate_start(self)
step2 = CFG.binarize(step1, new_token_padding)
if flexible:
return step2
step3 = CFG.remove_unitary_rules(step2)
step4 = CFG(step3.start(), list(set(step3.productions())))
return step4
def remove_unitary_rules(cls, grammar):
"""
Remove nonlexical unitary rules and convert them to
lexical
"""
result = []
unitary = []
for rule in grammar.productions():
if len(rule) == 1 and rule.is_nonlexical():
unitary.append(rule)
else:
result.append(rule)
while unitary:
rule = unitary.pop(0)
for item in grammar.productions(lhs=rule.rhs()[0]):
new_rule = Production(rule.lhs(), item.rhs())
if len(new_rule) != 1 or new_rule.is_lexical():
result.append(new_rule)
else:
unitary.append(new_rule)
n_grammar = CFG(grammar.start(), result)
return n_grammar
def binarize(cls, grammar, padding="@$@"):
"""
Convert all non-binary rules into binary by introducing
new tokens.
Example::
Original:
A => B C D
After Conversion:
A => B A@$@B
A@$@B => C D
"""
result = []
for rule in grammar.productions():
if len(rule.rhs()) > 2:
# this rule needs to be broken down
left_side = rule.lhs()
for k in range(0, len(rule.rhs()) - 2):
tsym = rule.rhs()[k]
new_sym = Nonterminal(left_side.symbol() + padding + tsym.symbol())
new_production = Production(left_side, (tsym, new_sym))
left_side = new_sym
result.append(new_production)
last_prd = Production(left_side, rule.rhs()[-2:])
result.append(last_prd)
else:
result.append(rule)
n_grammar = CFG(grammar.start(), result)
return n_grammar
def eliminate_start(cls, grammar):
"""
Eliminate start rule in case it appears on RHS
Example: S -> S0 S1 and S0 -> S1 S
Then another rule S0_Sigma -> S is added
"""
start = grammar.start()
result = []
need_to_add = None
for rule in grammar.productions():
if start in rule.rhs():
need_to_add = True
result.append(rule)
if need_to_add:
start = Nonterminal("S0_SIGMA")
result.append(Production(start, [grammar.start()]))
n_grammar = CFG(start, result)
return n_grammar
return grammar
def __repr__(self):
return "<Grammar with %d productions>" % len(self._productions)
def __str__(self):
result = "Grammar with %d productions" % len(self._productions)
result += " (start state = %r)" % self._start
for production in self._productions:
result += "\n %s" % production
return result
def demo2():
from nltk import CFG, Nonterminal, Production
nonterminals = "S VP NP PP P N Name V Det"
(S, VP, NP, PP, P, N, Name, V, Det) = (Nonterminal(s) for s in nonterminals.split())
productions = (
# Syntactic Productions
Production(S, [NP, VP]),
Production(NP, [Det, N]),
Production(NP, [NP, PP]),
Production(VP, [VP, PP]),
Production(VP, [V, NP, PP]),
Production(VP, [V, NP]),
Production(PP, [P, NP]),
Production(PP, []),
Production(PP, ["up", "over", NP]),
# Lexical Productions
Production(NP, ["I"]),
Production(Det, ["the"]),
Production(Det, ["a"]),
Production(N, ["man"]),
Production(V, ["saw"]),
Production(P, ["in"]),
Production(P, ["with"]),
Production(N, ["park"]),
Production(N, ["dog"]),
Production(N, ["statue"]),
Production(Det, ["my"]),
)
grammar = CFG(S, productions)
text = "I saw a man in the park".split()
d = CFGDemo(grammar, text)
d.mainloop() | null |
170,565 | import re
from tkinter import (
Button,
Canvas,
Entry,
Frame,
IntVar,
Label,
Scrollbar,
Text,
Tk,
Toplevel,
)
from nltk.draw.tree import TreeSegmentWidget, tree_to_treesegment
from nltk.draw.util import (
CanvasFrame,
ColorizedList,
ShowText,
SymbolWidget,
TextWidget,
)
from nltk.grammar import CFG, Nonterminal, _read_cfg_production, nonterminals
from nltk.tree import Tree
class CFGEditor:
"""
A dialog window for creating and editing context free grammars.
``CFGEditor`` imposes the following restrictions:
- All nonterminals must be strings consisting of word
characters.
- All terminals must be strings consisting of word characters
and space characters.
"""
# Regular expressions used by _analyze_line. Precompile them, so
# we can process the text faster.
ARROW = SymbolWidget.SYMBOLS["rightarrow"]
_LHS_RE = re.compile(r"(^\s*\w+\s*)(->|(" + ARROW + "))")
_ARROW_RE = re.compile(r"\s*(->|(" + ARROW + r"))\s*")
_PRODUCTION_RE = re.compile(
r"(^\s*\w+\s*)"
+ "(->|(" # LHS
+ ARROW
+ r"))\s*"
+ r"((\w+|'[\w ]*'|\"[\w ]*\"|\|)\s*)*$" # arrow
) # RHS
_TOKEN_RE = re.compile("\\w+|->|'[\\w ]+'|\"[\\w ]+\"|(" + ARROW + ")")
_BOLD = ("helvetica", -12, "bold")
def __init__(self, parent, cfg=None, set_cfg_callback=None):
self._parent = parent
if cfg is not None:
self._cfg = cfg
else:
self._cfg = CFG(Nonterminal("S"), [])
self._set_cfg_callback = set_cfg_callback
self._highlight_matching_nonterminals = 1
# Create the top-level window.
self._top = Toplevel(parent)
self._init_bindings()
self._init_startframe()
self._startframe.pack(side="top", fill="x", expand=0)
self._init_prodframe()
self._prodframe.pack(side="top", fill="both", expand=1)
self._init_buttons()
self._buttonframe.pack(side="bottom", fill="x", expand=0)
self._textwidget.focus()
def _init_startframe(self):
frame = self._startframe = Frame(self._top)
self._start = Entry(frame)
self._start.pack(side="right")
Label(frame, text="Start Symbol:").pack(side="right")
Label(frame, text="Productions:").pack(side="left")
self._start.insert(0, self._cfg.start().symbol())
def _init_buttons(self):
frame = self._buttonframe = Frame(self._top)
Button(frame, text="Ok", command=self._ok, underline=0, takefocus=0).pack(
side="left"
)
Button(frame, text="Apply", command=self._apply, underline=0, takefocus=0).pack(
side="left"
)
Button(frame, text="Reset", command=self._reset, underline=0, takefocus=0).pack(
side="left"
)
Button(
frame, text="Cancel", command=self._cancel, underline=0, takefocus=0
).pack(side="left")
Button(frame, text="Help", command=self._help, underline=0, takefocus=0).pack(
side="right"
)
def _init_bindings(self):
self._top.title("CFG Editor")
self._top.bind("<Control-q>", self._cancel)
self._top.bind("<Alt-q>", self._cancel)
self._top.bind("<Control-d>", self._cancel)
# self._top.bind('<Control-x>', self._cancel)
self._top.bind("<Alt-x>", self._cancel)
self._top.bind("<Escape>", self._cancel)
# self._top.bind('<Control-c>', self._cancel)
self._top.bind("<Alt-c>", self._cancel)
self._top.bind("<Control-o>", self._ok)
self._top.bind("<Alt-o>", self._ok)
self._top.bind("<Control-a>", self._apply)
self._top.bind("<Alt-a>", self._apply)
self._top.bind("<Control-r>", self._reset)
self._top.bind("<Alt-r>", self._reset)
self._top.bind("<Control-h>", self._help)
self._top.bind("<Alt-h>", self._help)
self._top.bind("<F1>", self._help)
def _init_prodframe(self):
self._prodframe = Frame(self._top)
# Create the basic Text widget & scrollbar.
self._textwidget = Text(
self._prodframe, background="#e0e0e0", exportselection=1
)
self._textscroll = Scrollbar(self._prodframe, takefocus=0, orient="vertical")
self._textwidget.config(yscrollcommand=self._textscroll.set)
self._textscroll.config(command=self._textwidget.yview)
self._textscroll.pack(side="right", fill="y")
self._textwidget.pack(expand=1, fill="both", side="left")
# Initialize the colorization tags. Each nonterminal gets its
# own tag, so they aren't listed here.
self._textwidget.tag_config("terminal", foreground="#006000")
self._textwidget.tag_config("arrow", font="symbol")
self._textwidget.tag_config("error", background="red")
# Keep track of what line they're on. We use that to remember
# to re-analyze a line whenever they leave it.
self._linenum = 0
# Expand "->" to an arrow.
self._top.bind(">", self._replace_arrows)
# Re-colorize lines when appropriate.
self._top.bind("<<Paste>>", self._analyze)
self._top.bind("<KeyPress>", self._check_analyze)
self._top.bind("<ButtonPress>", self._check_analyze)
# Tab cycles focus. (why doesn't this work??)
def cycle(e, textwidget=self._textwidget):
textwidget.tk_focusNext().focus()
self._textwidget.bind("<Tab>", cycle)
prod_tuples = [(p.lhs(), [p.rhs()]) for p in self._cfg.productions()]
for i in range(len(prod_tuples) - 1, 0, -1):
if prod_tuples[i][0] == prod_tuples[i - 1][0]:
if () in prod_tuples[i][1]:
continue
if () in prod_tuples[i - 1][1]:
continue
print(prod_tuples[i - 1][1])
print(prod_tuples[i][1])
prod_tuples[i - 1][1].extend(prod_tuples[i][1])
del prod_tuples[i]
for lhs, rhss in prod_tuples:
print(lhs, rhss)
s = "%s ->" % lhs
for rhs in rhss:
for elt in rhs:
if isinstance(elt, Nonterminal):
s += " %s" % elt
else:
s += " %r" % elt
s += " |"
s = s[:-2] + "\n"
self._textwidget.insert("end", s)
self._analyze()
# # Add the producitons to the text widget, and colorize them.
# prod_by_lhs = {}
# for prod in self._cfg.productions():
# if len(prod.rhs()) > 0:
# prod_by_lhs.setdefault(prod.lhs(),[]).append(prod)
# for (lhs, prods) in prod_by_lhs.items():
# self._textwidget.insert('end', '%s ->' % lhs)
# self._textwidget.insert('end', self._rhs(prods[0]))
# for prod in prods[1:]:
# print '\t|'+self._rhs(prod),
# self._textwidget.insert('end', '\t|'+self._rhs(prod))
# print
# self._textwidget.insert('end', '\n')
# for prod in self._cfg.productions():
# if len(prod.rhs()) == 0:
# self._textwidget.insert('end', '%s' % prod)
# self._analyze()
# def _rhs(self, prod):
# s = ''
# for elt in prod.rhs():
# if isinstance(elt, Nonterminal): s += ' %s' % elt.symbol()
# else: s += ' %r' % elt
# return s
def _clear_tags(self, linenum):
"""
Remove all tags (except ``arrow`` and ``sel``) from the given
line of the text widget used for editing the productions.
"""
start = "%d.0" % linenum
end = "%d.end" % linenum
for tag in self._textwidget.tag_names():
if tag not in ("arrow", "sel"):
self._textwidget.tag_remove(tag, start, end)
def _check_analyze(self, *e):
"""
Check if we've moved to a new line. If we have, then remove
all colorization from the line we moved to, and re-colorize
the line that we moved from.
"""
linenum = int(self._textwidget.index("insert").split(".")[0])
if linenum != self._linenum:
self._clear_tags(linenum)
self._analyze_line(self._linenum)
self._linenum = linenum
def _replace_arrows(self, *e):
"""
Replace any ``'->'`` text strings with arrows (char \\256, in
symbol font). This searches the whole buffer, but is fast
enough to be done anytime they press '>'.
"""
arrow = "1.0"
while True:
arrow = self._textwidget.search("->", arrow, "end+1char")
if arrow == "":
break
self._textwidget.delete(arrow, arrow + "+2char")
self._textwidget.insert(arrow, self.ARROW, "arrow")
self._textwidget.insert(arrow, "\t")
arrow = "1.0"
while True:
arrow = self._textwidget.search(self.ARROW, arrow + "+1char", "end+1char")
if arrow == "":
break
self._textwidget.tag_add("arrow", arrow, arrow + "+1char")
def _analyze_token(self, match, linenum):
"""
Given a line number and a regexp match for a token on that
line, colorize the token. Note that the regexp match gives us
the token's text, start index (on the line), and end index (on
the line).
"""
# What type of token is it?
if match.group()[0] in "'\"":
tag = "terminal"
elif match.group() in ("->", self.ARROW):
tag = "arrow"
else:
# If it's a nonterminal, then set up new bindings, so we
# can highlight all instances of that nonterminal when we
# put the mouse over it.
tag = "nonterminal_" + match.group()
if tag not in self._textwidget.tag_names():
self._init_nonterminal_tag(tag)
start = "%d.%d" % (linenum, match.start())
end = "%d.%d" % (linenum, match.end())
self._textwidget.tag_add(tag, start, end)
def _init_nonterminal_tag(self, tag, foreground="blue"):
self._textwidget.tag_config(tag, foreground=foreground, font=CFGEditor._BOLD)
if not self._highlight_matching_nonterminals:
return
def enter(e, textwidget=self._textwidget, tag=tag):
textwidget.tag_config(tag, background="#80ff80")
def leave(e, textwidget=self._textwidget, tag=tag):
textwidget.tag_config(tag, background="")
self._textwidget.tag_bind(tag, "<Enter>", enter)
self._textwidget.tag_bind(tag, "<Leave>", leave)
def _analyze_line(self, linenum):
"""
Colorize a given line.
"""
# Get rid of any tags that were previously on the line.
self._clear_tags(linenum)
# Get the line line's text string.
line = self._textwidget.get(repr(linenum) + ".0", repr(linenum) + ".end")
# If it's a valid production, then colorize each token.
if CFGEditor._PRODUCTION_RE.match(line):
# It's valid; Use _TOKEN_RE to tokenize the production,
# and call analyze_token on each token.
def analyze_token(match, self=self, linenum=linenum):
self._analyze_token(match, linenum)
return ""
CFGEditor._TOKEN_RE.sub(analyze_token, line)
elif line.strip() != "":
# It's invalid; show the user where the error is.
self._mark_error(linenum, line)
def _mark_error(self, linenum, line):
"""
Mark the location of an error in a line.
"""
arrowmatch = CFGEditor._ARROW_RE.search(line)
if not arrowmatch:
# If there's no arrow at all, highlight the whole line.
start = "%d.0" % linenum
end = "%d.end" % linenum
elif not CFGEditor._LHS_RE.match(line):
# Otherwise, if the LHS is bad, highlight it.
start = "%d.0" % linenum
end = "%d.%d" % (linenum, arrowmatch.start())
else:
# Otherwise, highlight the RHS.
start = "%d.%d" % (linenum, arrowmatch.end())
end = "%d.end" % linenum
# If we're highlighting 0 chars, highlight the whole line.
if self._textwidget.compare(start, "==", end):
start = "%d.0" % linenum
end = "%d.end" % linenum
self._textwidget.tag_add("error", start, end)
def _analyze(self, *e):
"""
Replace ``->`` with arrows, and colorize the entire buffer.
"""
self._replace_arrows()
numlines = int(self._textwidget.index("end").split(".")[0])
for linenum in range(1, numlines + 1): # line numbers start at 1.
self._analyze_line(linenum)
def _parse_productions(self):
"""
Parse the current contents of the textwidget buffer, to create
a list of productions.
"""
productions = []
# Get the text, normalize it, and split it into lines.
text = self._textwidget.get("1.0", "end")
text = re.sub(self.ARROW, "->", text)
text = re.sub("\t", " ", text)
lines = text.split("\n")
# Convert each line to a CFG production
for line in lines:
line = line.strip()
if line == "":
continue
productions += _read_cfg_production(line)
# if line.strip() == '': continue
# if not CFGEditor._PRODUCTION_RE.match(line):
# raise ValueError('Bad production string %r' % line)
#
# (lhs_str, rhs_str) = line.split('->')
# lhs = Nonterminal(lhs_str.strip())
# rhs = []
# def parse_token(match, rhs=rhs):
# token = match.group()
# if token[0] in "'\"": rhs.append(token[1:-1])
# else: rhs.append(Nonterminal(token))
# return ''
# CFGEditor._TOKEN_RE.sub(parse_token, rhs_str)
#
# productions.append(Production(lhs, *rhs))
return productions
def _destroy(self, *e):
if self._top is None:
return
self._top.destroy()
self._top = None
def _ok(self, *e):
self._apply()
self._destroy()
def _apply(self, *e):
productions = self._parse_productions()
start = Nonterminal(self._start.get())
cfg = CFG(start, productions)
if self._set_cfg_callback is not None:
self._set_cfg_callback(cfg)
def _reset(self, *e):
self._textwidget.delete("1.0", "end")
for production in self._cfg.productions():
self._textwidget.insert("end", "%s\n" % production)
self._analyze()
if self._set_cfg_callback is not None:
self._set_cfg_callback(self._cfg)
def _cancel(self, *e):
try:
self._reset()
except:
pass
self._destroy()
def _help(self, *e):
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(
self._parent,
"Help: Chart Parser Demo",
(_CFGEditor_HELP).strip(),
width=75,
font="fixed",
)
except:
ShowText(
self._parent,
"Help: Chart Parser Demo",
(_CFGEditor_HELP).strip(),
width=75,
)
class Nonterminal:
"""
A non-terminal symbol for a context free grammar. ``Nonterminal``
is a wrapper class for node values; it is used by ``Production``
objects to distinguish node values from leaf values.
The node value that is wrapped by a ``Nonterminal`` is known as its
"symbol". Symbols are typically strings representing phrasal
categories (such as ``"NP"`` or ``"VP"``). However, more complex
symbol types are sometimes used (e.g., for lexicalized grammars).
Since symbols are node values, they must be immutable and
hashable. Two ``Nonterminals`` are considered equal if their
symbols are equal.
:see: ``CFG``, ``Production``
:type _symbol: any
:ivar _symbol: The node value corresponding to this
``Nonterminal``. This value must be immutable and hashable.
"""
def __init__(self, symbol):
"""
Construct a new non-terminal from the given symbol.
:type symbol: any
:param symbol: The node value corresponding to this
``Nonterminal``. This value must be immutable and
hashable.
"""
self._symbol = symbol
def symbol(self):
"""
Return the node value corresponding to this ``Nonterminal``.
:rtype: (any)
"""
return self._symbol
def __eq__(self, other):
"""
Return True if this non-terminal is equal to ``other``. In
particular, return True if ``other`` is a ``Nonterminal``
and this non-terminal's symbol is equal to ``other`` 's symbol.
:rtype: bool
"""
return type(self) == type(other) and self._symbol == other._symbol
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, Nonterminal):
raise_unorderable_types("<", self, other)
return self._symbol < other._symbol
def __hash__(self):
return hash(self._symbol)
def __repr__(self):
"""
Return a string representation for this ``Nonterminal``.
:rtype: str
"""
if isinstance(self._symbol, str):
return "%s" % self._symbol
else:
return "%s" % repr(self._symbol)
def __str__(self):
"""
Return a string representation for this ``Nonterminal``.
:rtype: str
"""
if isinstance(self._symbol, str):
return "%s" % self._symbol
else:
return "%s" % repr(self._symbol)
def __div__(self, rhs):
"""
Return a new nonterminal whose symbol is ``A/B``, where ``A`` is
the symbol for this nonterminal, and ``B`` is the symbol for rhs.
:param rhs: The nonterminal used to form the right hand side
of the new nonterminal.
:type rhs: Nonterminal
:rtype: Nonterminal
"""
return Nonterminal(f"{self._symbol}/{rhs._symbol}")
def __truediv__(self, rhs):
"""
Return a new nonterminal whose symbol is ``A/B``, where ``A`` is
the symbol for this nonterminal, and ``B`` is the symbol for rhs.
This function allows use of the slash ``/`` operator with
the future import of division.
:param rhs: The nonterminal used to form the right hand side
of the new nonterminal.
:type rhs: Nonterminal
:rtype: Nonterminal
"""
return self.__div__(rhs)
def nonterminals(symbols):
"""
Given a string containing a list of symbol names, return a list of
``Nonterminals`` constructed from those symbols.
:param symbols: The symbol name string. This string can be
delimited by either spaces or commas.
:type symbols: str
:return: A list of ``Nonterminals`` constructed from the symbol
names given in ``symbols``. The ``Nonterminals`` are sorted
in the same order as the symbols names.
:rtype: list(Nonterminal)
"""
if "," in symbols:
symbol_list = symbols.split(",")
else:
symbol_list = symbols.split()
return [Nonterminal(s.strip()) for s in symbol_list]
class CFG:
"""
A context-free grammar. A grammar consists of a start state and
a set of productions. The set of terminals and nonterminals is
implicitly specified by the productions.
If you need efficient key-based access to productions, you
can use a subclass to implement it.
"""
def __init__(self, start, productions, calculate_leftcorners=True):
"""
Create a new context-free grammar, from the given start state
and set of ``Production`` instances.
:param start: The start symbol
:type start: Nonterminal
:param productions: The list of productions that defines the grammar
:type productions: list(Production)
:param calculate_leftcorners: False if we don't want to calculate the
leftcorner relation. In that case, some optimized chart parsers won't work.
:type calculate_leftcorners: bool
"""
if not is_nonterminal(start):
raise TypeError(
"start should be a Nonterminal object,"
" not a %s" % type(start).__name__
)
self._start = start
self._productions = productions
self._categories = {prod.lhs() for prod in productions}
self._calculate_indexes()
self._calculate_grammar_forms()
if calculate_leftcorners:
self._calculate_leftcorners()
def _calculate_indexes(self):
self._lhs_index = {}
self._rhs_index = {}
self._empty_index = {}
self._lexical_index = {}
for prod in self._productions:
# Left hand side.
lhs = prod._lhs
if lhs not in self._lhs_index:
self._lhs_index[lhs] = []
self._lhs_index[lhs].append(prod)
if prod._rhs:
# First item in right hand side.
rhs0 = prod._rhs[0]
if rhs0 not in self._rhs_index:
self._rhs_index[rhs0] = []
self._rhs_index[rhs0].append(prod)
else:
# The right hand side is empty.
self._empty_index[prod.lhs()] = prod
# Lexical tokens in the right hand side.
for token in prod._rhs:
if is_terminal(token):
self._lexical_index.setdefault(token, set()).add(prod)
def _calculate_leftcorners(self):
# Calculate leftcorner relations, for use in optimized parsing.
self._immediate_leftcorner_categories = {cat: {cat} for cat in self._categories}
self._immediate_leftcorner_words = {cat: set() for cat in self._categories}
for prod in self.productions():
if len(prod) > 0:
cat, left = prod.lhs(), prod.rhs()[0]
if is_nonterminal(left):
self._immediate_leftcorner_categories[cat].add(left)
else:
self._immediate_leftcorner_words[cat].add(left)
lc = transitive_closure(self._immediate_leftcorner_categories, reflexive=True)
self._leftcorners = lc
self._leftcorner_parents = invert_graph(lc)
nr_leftcorner_categories = sum(
map(len, self._immediate_leftcorner_categories.values())
)
nr_leftcorner_words = sum(map(len, self._immediate_leftcorner_words.values()))
if nr_leftcorner_words > nr_leftcorner_categories > 10000:
# If the grammar is big, the leftcorner-word dictionary will be too large.
# In that case it is better to calculate the relation on demand.
self._leftcorner_words = None
return
self._leftcorner_words = {}
for cat in self._leftcorners:
lefts = self._leftcorners[cat]
lc = self._leftcorner_words[cat] = set()
for left in lefts:
lc.update(self._immediate_leftcorner_words.get(left, set()))
def fromstring(cls, input, encoding=None):
"""
Return the grammar instance corresponding to the input string(s).
:param input: a grammar, either in the form of a string or as a list of strings.
"""
start, productions = read_grammar(
input, standard_nonterm_parser, encoding=encoding
)
return cls(start, productions)
def start(self):
"""
Return the start symbol of the grammar
:rtype: Nonterminal
"""
return self._start
# tricky to balance readability and efficiency here!
# can't use set operations as they don't preserve ordering
def productions(self, lhs=None, rhs=None, empty=False):
"""
Return the grammar productions, filtered by the left-hand side
or the first item in the right-hand side.
:param lhs: Only return productions with the given left-hand side.
:param rhs: Only return productions with the given first item
in the right-hand side.
:param empty: Only return productions with an empty right-hand side.
:return: A list of productions matching the given constraints.
:rtype: list(Production)
"""
if rhs and empty:
raise ValueError(
"You cannot select empty and non-empty " "productions at the same time."
)
# no constraints so return everything
if not lhs and not rhs:
if not empty:
return self._productions
else:
return self._empty_index.values()
# only lhs specified so look up its index
elif lhs and not rhs:
if not empty:
return self._lhs_index.get(lhs, [])
elif lhs in self._empty_index:
return [self._empty_index[lhs]]
else:
return []
# only rhs specified so look up its index
elif rhs and not lhs:
return self._rhs_index.get(rhs, [])
# intersect
else:
return [
prod
for prod in self._lhs_index.get(lhs, [])
if prod in self._rhs_index.get(rhs, [])
]
def leftcorners(self, cat):
"""
Return the set of all nonterminals that the given nonterminal
can start with, including itself.
This is the reflexive, transitive closure of the immediate
leftcorner relation: (A > B) iff (A -> B beta)
:param cat: the parent of the leftcorners
:type cat: Nonterminal
:return: the set of all leftcorners
:rtype: set(Nonterminal)
"""
return self._leftcorners.get(cat, {cat})
def is_leftcorner(self, cat, left):
"""
True if left is a leftcorner of cat, where left can be a
terminal or a nonterminal.
:param cat: the parent of the leftcorner
:type cat: Nonterminal
:param left: the suggested leftcorner
:type left: Terminal or Nonterminal
:rtype: bool
"""
if is_nonterminal(left):
return left in self.leftcorners(cat)
elif self._leftcorner_words:
return left in self._leftcorner_words.get(cat, set())
else:
return any(
left in self._immediate_leftcorner_words.get(parent, set())
for parent in self.leftcorners(cat)
)
def leftcorner_parents(self, cat):
"""
Return the set of all nonterminals for which the given category
is a left corner. This is the inverse of the leftcorner relation.
:param cat: the suggested leftcorner
:type cat: Nonterminal
:return: the set of all parents to the leftcorner
:rtype: set(Nonterminal)
"""
return self._leftcorner_parents.get(cat, {cat})
def check_coverage(self, tokens):
"""
Check whether the grammar rules cover the given list of tokens.
If not, then raise an exception.
:type tokens: list(str)
"""
missing = [tok for tok in tokens if not self._lexical_index.get(tok)]
if missing:
missing = ", ".join(f"{w!r}" for w in missing)
raise ValueError(
"Grammar does not cover some of the " "input words: %r." % missing
)
def _calculate_grammar_forms(self):
"""
Pre-calculate of which form(s) the grammar is.
"""
prods = self._productions
self._is_lexical = all(p.is_lexical() for p in prods)
self._is_nonlexical = all(p.is_nonlexical() for p in prods if len(p) != 1)
self._min_len = min(len(p) for p in prods)
self._max_len = max(len(p) for p in prods)
self._all_unary_are_lexical = all(p.is_lexical() for p in prods if len(p) == 1)
def is_lexical(self):
"""
Return True if all productions are lexicalised.
"""
return self._is_lexical
def is_nonlexical(self):
"""
Return True if all lexical rules are "preterminals", that is,
unary rules which can be separated in a preprocessing step.
This means that all productions are of the forms
A -> B1 ... Bn (n>=0), or A -> "s".
Note: is_lexical() and is_nonlexical() are not opposites.
There are grammars which are neither, and grammars which are both.
"""
return self._is_nonlexical
def min_len(self):
"""
Return the right-hand side length of the shortest grammar production.
"""
return self._min_len
def max_len(self):
"""
Return the right-hand side length of the longest grammar production.
"""
return self._max_len
def is_nonempty(self):
"""
Return True if there are no empty productions.
"""
return self._min_len > 0
def is_binarised(self):
"""
Return True if all productions are at most binary.
Note that there can still be empty and unary productions.
"""
return self._max_len <= 2
def is_flexible_chomsky_normal_form(self):
"""
Return True if all productions are of the forms
A -> B C, A -> B, or A -> "s".
"""
return self.is_nonempty() and self.is_nonlexical() and self.is_binarised()
def is_chomsky_normal_form(self):
"""
Return True if the grammar is of Chomsky Normal Form, i.e. all productions
are of the form A -> B C, or A -> "s".
"""
return self.is_flexible_chomsky_normal_form() and self._all_unary_are_lexical
def chomsky_normal_form(self, new_token_padding="@$@", flexible=False):
"""
Returns a new Grammar that is in chomsky normal
:param: new_token_padding
Customise new rule formation during binarisation
"""
if self.is_chomsky_normal_form():
return self
if self.productions(empty=True):
raise ValueError(
"Grammar has Empty rules. " "Cannot deal with them at the moment"
)
# check for mixed rules
for rule in self.productions():
if rule.is_lexical() and len(rule.rhs()) > 1:
raise ValueError(
f"Cannot handled mixed rule {rule.lhs()} => {rule.rhs()}"
)
step1 = CFG.eliminate_start(self)
step2 = CFG.binarize(step1, new_token_padding)
if flexible:
return step2
step3 = CFG.remove_unitary_rules(step2)
step4 = CFG(step3.start(), list(set(step3.productions())))
return step4
def remove_unitary_rules(cls, grammar):
"""
Remove nonlexical unitary rules and convert them to
lexical
"""
result = []
unitary = []
for rule in grammar.productions():
if len(rule) == 1 and rule.is_nonlexical():
unitary.append(rule)
else:
result.append(rule)
while unitary:
rule = unitary.pop(0)
for item in grammar.productions(lhs=rule.rhs()[0]):
new_rule = Production(rule.lhs(), item.rhs())
if len(new_rule) != 1 or new_rule.is_lexical():
result.append(new_rule)
else:
unitary.append(new_rule)
n_grammar = CFG(grammar.start(), result)
return n_grammar
def binarize(cls, grammar, padding="@$@"):
"""
Convert all non-binary rules into binary by introducing
new tokens.
Example::
Original:
A => B C D
After Conversion:
A => B A@$@B
A@$@B => C D
"""
result = []
for rule in grammar.productions():
if len(rule.rhs()) > 2:
# this rule needs to be broken down
left_side = rule.lhs()
for k in range(0, len(rule.rhs()) - 2):
tsym = rule.rhs()[k]
new_sym = Nonterminal(left_side.symbol() + padding + tsym.symbol())
new_production = Production(left_side, (tsym, new_sym))
left_side = new_sym
result.append(new_production)
last_prd = Production(left_side, rule.rhs()[-2:])
result.append(last_prd)
else:
result.append(rule)
n_grammar = CFG(grammar.start(), result)
return n_grammar
def eliminate_start(cls, grammar):
"""
Eliminate start rule in case it appears on RHS
Example: S -> S0 S1 and S0 -> S1 S
Then another rule S0_Sigma -> S is added
"""
start = grammar.start()
result = []
need_to_add = None
for rule in grammar.productions():
if start in rule.rhs():
need_to_add = True
result.append(rule)
if need_to_add:
start = Nonterminal("S0_SIGMA")
result.append(Production(start, [grammar.start()]))
n_grammar = CFG(start, result)
return n_grammar
return grammar
def __repr__(self):
return "<Grammar with %d productions>" % len(self._productions)
def __str__(self):
result = "Grammar with %d productions" % len(self._productions)
result += " (start state = %r)" % self._start
for production in self._productions:
result += "\n %s" % production
return result
def demo():
from nltk import CFG, Nonterminal
nonterminals = "S VP NP PP P N Name V Det"
(S, VP, NP, PP, P, N, Name, V, Det) = (Nonterminal(s) for s in nonterminals.split())
grammar = CFG.fromstring(
"""
S -> NP VP
PP -> P NP
NP -> Det N
NP -> NP PP
VP -> V NP
VP -> VP PP
Det -> 'a'
Det -> 'the'
Det -> 'my'
NP -> 'I'
N -> 'dog'
N -> 'man'
N -> 'park'
N -> 'statue'
V -> 'saw'
P -> 'in'
P -> 'up'
P -> 'over'
P -> 'with'
"""
)
def cb(grammar):
print(grammar)
top = Tk()
editor = CFGEditor(top, grammar, cb)
Label(top, text="\nTesting CFG Editor\n").pack()
Button(top, text="Quit", command=top.destroy).pack()
top.mainloop() | null |
170,566 | import re
from tkinter import (
Button,
Canvas,
Entry,
Frame,
IntVar,
Label,
Scrollbar,
Text,
Tk,
Toplevel,
)
from nltk.draw.tree import TreeSegmentWidget, tree_to_treesegment
from nltk.draw.util import (
CanvasFrame,
ColorizedList,
ShowText,
SymbolWidget,
TextWidget,
)
from nltk.grammar import CFG, Nonterminal, _read_cfg_production, nonterminals
from nltk.tree import Tree
class ProductionList(ColorizedList):
ARROW = SymbolWidget.SYMBOLS["rightarrow"]
def _init_colortags(self, textwidget, options):
textwidget.tag_config("terminal", foreground="#006000")
textwidget.tag_config("arrow", font="symbol", underline="0")
textwidget.tag_config(
"nonterminal", foreground="blue", font=("helvetica", -12, "bold")
)
def _item_repr(self, item):
contents = []
contents.append(("%s\t" % item.lhs(), "nonterminal"))
contents.append((self.ARROW, "arrow"))
for elt in item.rhs():
if isinstance(elt, Nonterminal):
contents.append((" %s" % elt.symbol(), "nonterminal"))
else:
contents.append((" %r" % elt, "terminal"))
return contents
def nonterminals(symbols):
"""
Given a string containing a list of symbol names, return a list of
``Nonterminals`` constructed from those symbols.
:param symbols: The symbol name string. This string can be
delimited by either spaces or commas.
:type symbols: str
:return: A list of ``Nonterminals`` constructed from the symbol
names given in ``symbols``. The ``Nonterminals`` are sorted
in the same order as the symbols names.
:rtype: list(Nonterminal)
"""
if "," in symbols:
symbol_list = symbols.split(",")
else:
symbol_list = symbols.split()
return [Nonterminal(s.strip()) for s in symbol_list]
def demo3():
from nltk import Production
(S, VP, NP, PP, P, N, Name, V, Det) = nonterminals(
"S, VP, NP, PP, P, N, Name, V, Det"
)
productions = (
# Syntactic Productions
Production(S, [NP, VP]),
Production(NP, [Det, N]),
Production(NP, [NP, PP]),
Production(VP, [VP, PP]),
Production(VP, [V, NP, PP]),
Production(VP, [V, NP]),
Production(PP, [P, NP]),
Production(PP, []),
Production(PP, ["up", "over", NP]),
# Lexical Productions
Production(NP, ["I"]),
Production(Det, ["the"]),
Production(Det, ["a"]),
Production(N, ["man"]),
Production(V, ["saw"]),
Production(P, ["in"]),
Production(P, ["with"]),
Production(N, ["park"]),
Production(N, ["dog"]),
Production(N, ["statue"]),
Production(Det, ["my"]),
)
t = Tk()
def destroy(e, t=t):
t.destroy()
t.bind("q", destroy)
p = ProductionList(t, productions)
p.pack(expand=1, fill="both")
p.add_callback("select", p.markonly)
p.add_callback("move", p.markonly)
p.focus()
p.mark(productions[2])
p.mark(productions[8]) | null |
170,567 | def align(str1, str2, epsilon=0):
"""
Compute the alignment of two phonetic strings.
:param str str1: First string to be aligned
:param str str2: Second string to be aligned
:type epsilon: float (0.0 to 1.0)
:param epsilon: Adjusts threshold similarity score for near-optimal alignments
:rtype: list(list(tuple(str, str)))
:return: Alignment(s) of str1 and str2
(Kondrak 2002: 51)
"""
if np is None:
raise ImportError("You need numpy in order to use the align function")
assert 0.0 <= epsilon <= 1.0, "Epsilon must be between 0.0 and 1.0."
m = len(str1)
n = len(str2)
# This includes Kondrak's initialization of row 0 and column 0 to all 0s.
S = np.zeros((m + 1, n + 1), dtype=float)
# If i <= 1 or j <= 1, don't allow expansions as it doesn't make sense,
# and breaks array and string indices. Make sure they never get chosen
# by setting them to -inf.
for i in range(1, m + 1):
for j in range(1, n + 1):
edit1 = S[i - 1, j] + sigma_skip(str1[i - 1])
edit2 = S[i, j - 1] + sigma_skip(str2[j - 1])
edit3 = S[i - 1, j - 1] + sigma_sub(str1[i - 1], str2[j - 1])
if i > 1:
edit4 = S[i - 2, j - 1] + sigma_exp(str2[j - 1], str1[i - 2 : i])
else:
edit4 = -inf
if j > 1:
edit5 = S[i - 1, j - 2] + sigma_exp(str1[i - 1], str2[j - 2 : j])
else:
edit5 = -inf
S[i, j] = max(edit1, edit2, edit3, edit4, edit5, 0)
T = (1 - epsilon) * np.amax(S) # Threshold score for near-optimal alignments
alignments = []
for i in range(1, m + 1):
for j in range(1, n + 1):
if S[i, j] >= T:
alignments.append(_retrieve(i, j, 0, S, T, str1, str2, []))
return alignments
cognate_data = """jo,ʒə
tu,ty
nosotros,nu
kjen,ki
ke,kwa
todos,tu
una,ən
dos,dø
tres,trwa
ombre,om
arbol,arbrə
pluma,plym
kabeθa,kap
boka,buʃ
pje,pje
koraθon,kœr
ber,vwar
benir,vənir
deθir,dir
pobre,povrə
ðis,dIzes
ðæt,das
wat,vas
nat,nixt
loŋ,laŋ
mæn,man
fleʃ,flajʃ
bləd,blyt
feðər,fEdər
hær,hAr
ir,Or
aj,awgə
nowz,nAzə
mawθ,munt
təŋ,tsuŋə
fut,fys
nij,knI
hænd,hant
hart,herts
livər,lEbər
ænd,ante
æt,ad
blow,flAre
ir,awris
ijt,edere
fiʃ,piʃkis
flow,fluere
staɾ,stella
ful,plenus
græs,gramen
hart,kordis
horn,korny
aj,ego
nij,genU
məðər,mAter
mawntən,mons
nejm,nomen
njuw,nowus
wən,unus
rawnd,rotundus
sow,suere
sit,sedere
θrij,tres
tuwθ,dentis
θin,tenwis
kinwawa,kenuaʔ
nina,nenah
napewa,napɛw
wapimini,wapemen
namesa,namɛʔs
okimawa,okemaw
ʃiʃipa,seʔsep
ahkohkwa,ahkɛh
pematesiweni,pematesewen
asenja,aʔsɛn"""
The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem:
A demonstration of the result of aligning phonetic sequences used in Kondrak's (2002) dissertation.
Here is the function:
def demo():
"""
A demonstration of the result of aligning phonetic sequences
used in Kondrak's (2002) dissertation.
"""
data = [pair.split(",") for pair in cognate_data.split("\n")]
for pair in data:
alignment = align(pair[0], pair[1])[0]
alignment = [f"({a[0]}, {a[1]})" for a in alignment]
alignment = " ".join(alignment)
print(f"{pair[0]} ~ {pair[1]} : {alignment}") | A demonstration of the result of aligning phonetic sequences used in Kondrak's (2002) dissertation. |
170,568 |
The provided code snippet includes necessary dependencies for implementing the `ranks_from_sequence` function. Write a Python function `def ranks_from_sequence(seq)` to solve the following problem:
Given a sequence, yields each element with an increasing rank, suitable for use as an argument to ``spearman_correlation``.
Here is the function:
def ranks_from_sequence(seq):
"""Given a sequence, yields each element with an increasing rank, suitable
for use as an argument to ``spearman_correlation``.
"""
return ((k, i) for i, k in enumerate(seq)) | Given a sequence, yields each element with an increasing rank, suitable for use as an argument to ``spearman_correlation``. |
170,569 | from math import sqrt
The provided code snippet includes necessary dependencies for implementing the `get_words_from_dictionary` function. Write a Python function `def get_words_from_dictionary(lemmas)` to solve the following problem:
Get original set of words used for analysis. :param lemmas: A dictionary where keys are lemmas and values are sets or lists of words corresponding to that lemma. :type lemmas: dict(str): list(str) :return: Set of words that exist as values in the dictionary :rtype: set(str)
Here is the function:
def get_words_from_dictionary(lemmas):
"""
Get original set of words used for analysis.
:param lemmas: A dictionary where keys are lemmas and values are sets
or lists of words corresponding to that lemma.
:type lemmas: dict(str): list(str)
:return: Set of words that exist as values in the dictionary
:rtype: set(str)
"""
words = set()
for lemma in lemmas:
words.update(set(lemmas[lemma]))
return words | Get original set of words used for analysis. :param lemmas: A dictionary where keys are lemmas and values are sets or lists of words corresponding to that lemma. :type lemmas: dict(str): list(str) :return: Set of words that exist as values in the dictionary :rtype: set(str) |
170,570 | from math import sqrt
The provided code snippet includes necessary dependencies for implementing the `_truncate` function. Write a Python function `def _truncate(words, cutlength)` to solve the following problem:
Group words by stems defined by truncating them at given length. :param words: Set of words used for analysis :param cutlength: Words are stemmed by cutting at this length. :type words: set(str) or list(str) :type cutlength: int :return: Dictionary where keys are stems and values are sets of words corresponding to that stem. :rtype: dict(str): set(str)
Here is the function:
def _truncate(words, cutlength):
"""Group words by stems defined by truncating them at given length.
:param words: Set of words used for analysis
:param cutlength: Words are stemmed by cutting at this length.
:type words: set(str) or list(str)
:type cutlength: int
:return: Dictionary where keys are stems and values are sets of words
corresponding to that stem.
:rtype: dict(str): set(str)
"""
stems = {}
for word in words:
stem = word[:cutlength]
try:
stems[stem].update([word])
except KeyError:
stems[stem] = {word}
return stems | Group words by stems defined by truncating them at given length. :param words: Set of words used for analysis :param cutlength: Words are stemmed by cutting at this length. :type words: set(str) or list(str) :type cutlength: int :return: Dictionary where keys are stems and values are sets of words corresponding to that stem. :rtype: dict(str): set(str) |
170,571 | from math import sqrt
The provided code snippet includes necessary dependencies for implementing the `_count_intersection` function. Write a Python function `def _count_intersection(l1, l2)` to solve the following problem:
Count intersection between two line segments defined by coordinate pairs. :param l1: Tuple of two coordinate pairs defining the first line segment :param l2: Tuple of two coordinate pairs defining the second line segment :type l1: tuple(float, float) :type l2: tuple(float, float) :return: Coordinates of the intersection :rtype: tuple(float, float)
Here is the function:
def _count_intersection(l1, l2):
"""Count intersection between two line segments defined by coordinate pairs.
:param l1: Tuple of two coordinate pairs defining the first line segment
:param l2: Tuple of two coordinate pairs defining the second line segment
:type l1: tuple(float, float)
:type l2: tuple(float, float)
:return: Coordinates of the intersection
:rtype: tuple(float, float)
"""
x1, y1 = l1[0]
x2, y2 = l1[1]
x3, y3 = l2[0]
x4, y4 = l2[1]
denominator = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)
if denominator == 0.0: # lines are parallel
if x1 == x2 == x3 == x4 == 0.0:
# When lines are parallel, they must be on the y-axis.
# We can ignore x-axis because we stop counting the
# truncation line when we get there.
# There are no other options as UI (x-axis) grows and
# OI (y-axis) diminishes when we go along the truncation line.
return (0.0, y4)
x = (
(x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)
) / denominator
y = (
(x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)
) / denominator
return (x, y) | Count intersection between two line segments defined by coordinate pairs. :param l1: Tuple of two coordinate pairs defining the first line segment :param l2: Tuple of two coordinate pairs defining the second line segment :type l1: tuple(float, float) :type l2: tuple(float, float) :return: Coordinates of the intersection :rtype: tuple(float, float) |
170,572 | from math import sqrt
The provided code snippet includes necessary dependencies for implementing the `_get_derivative` function. Write a Python function `def _get_derivative(coordinates)` to solve the following problem:
Get derivative of the line from (0,0) to given coordinates. :param coordinates: A coordinate pair :type coordinates: tuple(float, float) :return: Derivative; inf if x is zero :rtype: float
Here is the function:
def _get_derivative(coordinates):
"""Get derivative of the line from (0,0) to given coordinates.
:param coordinates: A coordinate pair
:type coordinates: tuple(float, float)
:return: Derivative; inf if x is zero
:rtype: float
"""
try:
return coordinates[1] / coordinates[0]
except ZeroDivisionError:
return float("inf") | Get derivative of the line from (0,0) to given coordinates. :param coordinates: A coordinate pair :type coordinates: tuple(float, float) :return: Derivative; inf if x is zero :rtype: float |
170,573 | from math import sqrt
def _calculate_cut(lemmawords, stems):
"""Count understemmed and overstemmed pairs for (lemma, stem) pair with common words.
:param lemmawords: Set or list of words corresponding to certain lemma.
:param stems: A dictionary where keys are stems and values are sets
or lists of words corresponding to that stem.
:type lemmawords: set(str) or list(str)
:type stems: dict(str): set(str)
:return: Amount of understemmed and overstemmed pairs contributed by words
existing in both lemmawords and stems.
:rtype: tuple(float, float)
"""
umt, wmt = 0.0, 0.0
for stem in stems:
cut = set(lemmawords) & set(stems[stem])
if cut:
cutcount = len(cut)
stemcount = len(stems[stem])
# Unachieved merge total
umt += cutcount * (len(lemmawords) - cutcount)
# Wrongly merged total
wmt += cutcount * (stemcount - cutcount)
return (umt, wmt)
The provided code snippet includes necessary dependencies for implementing the `_calculate` function. Write a Python function `def _calculate(lemmas, stems)` to solve the following problem:
Calculate actual and maximum possible amounts of understemmed and overstemmed word pairs. :param lemmas: A dictionary where keys are lemmas and values are sets or lists of words corresponding to that lemma. :param stems: A dictionary where keys are stems and values are sets or lists of words corresponding to that stem. :type lemmas: dict(str): list(str) :type stems: dict(str): set(str) :return: Global unachieved merge total (gumt), global desired merge total (gdmt), global wrongly merged total (gwmt) and global desired non-merge total (gdnt). :rtype: tuple(float, float, float, float)
Here is the function:
def _calculate(lemmas, stems):
"""Calculate actual and maximum possible amounts of understemmed and overstemmed word pairs.
:param lemmas: A dictionary where keys are lemmas and values are sets
or lists of words corresponding to that lemma.
:param stems: A dictionary where keys are stems and values are sets
or lists of words corresponding to that stem.
:type lemmas: dict(str): list(str)
:type stems: dict(str): set(str)
:return: Global unachieved merge total (gumt),
global desired merge total (gdmt),
global wrongly merged total (gwmt) and
global desired non-merge total (gdnt).
:rtype: tuple(float, float, float, float)
"""
n = sum(len(lemmas[word]) for word in lemmas)
gdmt, gdnt, gumt, gwmt = (0.0, 0.0, 0.0, 0.0)
for lemma in lemmas:
lemmacount = len(lemmas[lemma])
# Desired merge total
gdmt += lemmacount * (lemmacount - 1)
# Desired non-merge total
gdnt += lemmacount * (n - lemmacount)
# For each (lemma, stem) pair with common words, count how many
# pairs are understemmed and overstemmed.
umt, wmt = _calculate_cut(lemmas[lemma], stems)
# Add to total undesired and wrongly-merged totals
gumt += umt
gwmt += wmt
# Each object is counted twice, so divide by two
return (gumt / 2, gdmt / 2, gwmt / 2, gdnt / 2) | Calculate actual and maximum possible amounts of understemmed and overstemmed word pairs. :param lemmas: A dictionary where keys are lemmas and values are sets or lists of words corresponding to that lemma. :param stems: A dictionary where keys are stems and values are sets or lists of words corresponding to that stem. :type lemmas: dict(str): list(str) :type stems: dict(str): set(str) :return: Global unachieved merge total (gumt), global desired merge total (gdmt), global wrongly merged total (gwmt) and global desired non-merge total (gdnt). :rtype: tuple(float, float, float, float) |
170,574 | from math import sqrt
The provided code snippet includes necessary dependencies for implementing the `_indexes` function. Write a Python function `def _indexes(gumt, gdmt, gwmt, gdnt)` to solve the following problem:
Count Understemming Index (UI), Overstemming Index (OI) and Stemming Weight (SW). :param gumt, gdmt, gwmt, gdnt: Global unachieved merge total (gumt), global desired merge total (gdmt), global wrongly merged total (gwmt) and global desired non-merge total (gdnt). :type gumt, gdmt, gwmt, gdnt: float :return: Understemming Index (UI), Overstemming Index (OI) and Stemming Weight (SW). :rtype: tuple(float, float, float)
Here is the function:
def _indexes(gumt, gdmt, gwmt, gdnt):
"""Count Understemming Index (UI), Overstemming Index (OI) and Stemming Weight (SW).
:param gumt, gdmt, gwmt, gdnt: Global unachieved merge total (gumt),
global desired merge total (gdmt),
global wrongly merged total (gwmt) and
global desired non-merge total (gdnt).
:type gumt, gdmt, gwmt, gdnt: float
:return: Understemming Index (UI),
Overstemming Index (OI) and
Stemming Weight (SW).
:rtype: tuple(float, float, float)
"""
# Calculate Understemming Index (UI),
# Overstemming Index (OI) and Stemming Weight (SW)
try:
ui = gumt / gdmt
except ZeroDivisionError:
# If GDMT (max merge total) is 0, define UI as 0
ui = 0.0
try:
oi = gwmt / gdnt
except ZeroDivisionError:
# IF GDNT (max non-merge total) is 0, define OI as 0
oi = 0.0
try:
sw = oi / ui
except ZeroDivisionError:
if oi == 0.0:
# OI and UI are 0, define SW as 'not a number'
sw = float("nan")
else:
# UI is 0, define SW as infinity
sw = float("inf")
return (ui, oi, sw) | Count Understemming Index (UI), Overstemming Index (OI) and Stemming Weight (SW). :param gumt, gdmt, gwmt, gdnt: Global unachieved merge total (gumt), global desired merge total (gdmt), global wrongly merged total (gwmt) and global desired non-merge total (gdnt). :type gumt, gdmt, gwmt, gdnt: float :return: Understemming Index (UI), Overstemming Index (OI) and Stemming Weight (SW). :rtype: tuple(float, float, float) |
170,575 | from math import sqrt
class Paice:
"""Class for storing lemmas, stems and evaluation metrics."""
def __init__(self, lemmas, stems):
"""
:param lemmas: A dictionary where keys are lemmas and values are sets
or lists of words corresponding to that lemma.
:param stems: A dictionary where keys are stems and values are sets
or lists of words corresponding to that stem.
:type lemmas: dict(str): list(str)
:type stems: dict(str): set(str)
"""
self.lemmas = lemmas
self.stems = stems
self.coords = []
self.gumt, self.gdmt, self.gwmt, self.gdnt = (None, None, None, None)
self.ui, self.oi, self.sw = (None, None, None)
self.errt = None
self.update()
def __str__(self):
text = ["Global Unachieved Merge Total (GUMT): %s\n" % self.gumt]
text.append("Global Desired Merge Total (GDMT): %s\n" % self.gdmt)
text.append("Global Wrongly-Merged Total (GWMT): %s\n" % self.gwmt)
text.append("Global Desired Non-merge Total (GDNT): %s\n" % self.gdnt)
text.append("Understemming Index (GUMT / GDMT): %s\n" % self.ui)
text.append("Overstemming Index (GWMT / GDNT): %s\n" % self.oi)
text.append("Stemming Weight (OI / UI): %s\n" % self.sw)
text.append("Error-Rate Relative to Truncation (ERRT): %s\r\n" % self.errt)
coordinates = " ".join(["(%s, %s)" % item for item in self.coords])
text.append("Truncation line: %s" % coordinates)
return "".join(text)
def _get_truncation_indexes(self, words, cutlength):
"""Count (UI, OI) when stemming is done by truncating words at \'cutlength\'.
:param words: Words used for the analysis
:param cutlength: Words are stemmed by cutting them at this length
:type words: set(str) or list(str)
:type cutlength: int
:return: Understemming and overstemming indexes
:rtype: tuple(int, int)
"""
truncated = _truncate(words, cutlength)
gumt, gdmt, gwmt, gdnt = _calculate(self.lemmas, truncated)
ui, oi = _indexes(gumt, gdmt, gwmt, gdnt)[:2]
return (ui, oi)
def _get_truncation_coordinates(self, cutlength=0):
"""Count (UI, OI) pairs for truncation points until we find the segment where (ui, oi) crosses the truncation line.
:param cutlength: Optional parameter to start counting from (ui, oi)
coordinates gotten by stemming at this length. Useful for speeding up
the calculations when you know the approximate location of the
intersection.
:type cutlength: int
:return: List of coordinate pairs that define the truncation line
:rtype: list(tuple(float, float))
"""
words = get_words_from_dictionary(self.lemmas)
maxlength = max(len(word) for word in words)
# Truncate words from different points until (0, 0) - (ui, oi) segment crosses the truncation line
coords = []
while cutlength <= maxlength:
# Get (UI, OI) pair of current truncation point
pair = self._get_truncation_indexes(words, cutlength)
# Store only new coordinates so we'll have an actual
# line segment when counting the intersection point
if pair not in coords:
coords.append(pair)
if pair == (0.0, 0.0):
# Stop counting if truncation line goes through origo;
# length from origo to truncation line is 0
return coords
if len(coords) >= 2 and pair[0] > 0.0:
derivative1 = _get_derivative(coords[-2])
derivative2 = _get_derivative(coords[-1])
# Derivative of the truncation line is a decreasing value;
# when it passes Stemming Weight, we've found the segment
# of truncation line intersecting with (0, 0) - (ui, oi) segment
if derivative1 >= self.sw >= derivative2:
return coords
cutlength += 1
return coords
def _errt(self):
"""Count Error-Rate Relative to Truncation (ERRT).
:return: ERRT, length of the line from origo to (UI, OI) divided by
the length of the line from origo to the point defined by the same
line when extended until the truncation line.
:rtype: float
"""
# Count (UI, OI) pairs for truncation points until we find the segment where (ui, oi) crosses the truncation line
self.coords = self._get_truncation_coordinates()
if (0.0, 0.0) in self.coords:
# Truncation line goes through origo, so ERRT cannot be counted
if (self.ui, self.oi) != (0.0, 0.0):
return float("inf")
else:
return float("nan")
if (self.ui, self.oi) == (0.0, 0.0):
# (ui, oi) is origo; define errt as 0.0
return 0.0
# Count the intersection point
# Note that (self.ui, self.oi) cannot be (0.0, 0.0) and self.coords has different coordinates
# so we have actual line segments instead of a line segment and a point
intersection = _count_intersection(
((0, 0), (self.ui, self.oi)), self.coords[-2:]
)
# Count OP (length of the line from origo to (ui, oi))
op = sqrt(self.ui**2 + self.oi**2)
# Count OT (length of the line from origo to truncation line that goes through (ui, oi))
ot = sqrt(intersection[0] ** 2 + intersection[1] ** 2)
# OP / OT tells how well the stemming algorithm works compared to just truncating words
return op / ot
def update(self):
"""Update statistics after lemmas and stems have been set."""
self.gumt, self.gdmt, self.gwmt, self.gdnt = _calculate(self.lemmas, self.stems)
self.ui, self.oi, self.sw = _indexes(self.gumt, self.gdmt, self.gwmt, self.gdnt)
self.errt = self._errt()
The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem:
Demonstration of the module.
Here is the function:
def demo():
"""Demonstration of the module."""
# Some words with their real lemmas
lemmas = {
"kneel": ["kneel", "knelt"],
"range": ["range", "ranged"],
"ring": ["ring", "rang", "rung"],
}
# Same words with stems from a stemming algorithm
stems = {
"kneel": ["kneel"],
"knelt": ["knelt"],
"rang": ["rang", "range", "ranged"],
"ring": ["ring"],
"rung": ["rung"],
}
print("Words grouped by their lemmas:")
for lemma in sorted(lemmas):
print("{} => {}".format(lemma, " ".join(lemmas[lemma])))
print()
print("Same words grouped by a stemming algorithm:")
for stem in sorted(stems):
print("{} => {}".format(stem, " ".join(stems[stem])))
print()
p = Paice(lemmas, stems)
print(p)
print()
# Let's "change" results from a stemming algorithm
stems = {
"kneel": ["kneel"],
"knelt": ["knelt"],
"rang": ["rang"],
"range": ["range", "ranged"],
"ring": ["ring"],
"rung": ["rung"],
}
print("Counting stats after changing stemming results:")
for stem in sorted(stems):
print("{} => {}".format(stem, " ".join(stems[stem])))
print()
p.stems = stems
p.update()
print(p) | Demonstration of the module. |
170,576 |
The provided code snippet includes necessary dependencies for implementing the `windowdiff` function. Write a Python function `def windowdiff(seg1, seg2, k, boundary="1", weighted=False)` to solve the following problem:
Compute the windowdiff score for a pair of segmentations. A segmentation is any sequence over a vocabulary of two items (e.g. "0", "1"), where the specified boundary value is used to mark the edge of a segmentation. >>> s1 = "000100000010" >>> s2 = "000010000100" >>> s3 = "100000010000" >>> '%.2f' % windowdiff(s1, s1, 3) '0.00' >>> '%.2f' % windowdiff(s1, s2, 3) '0.30' >>> '%.2f' % windowdiff(s2, s3, 3) '0.80' :param seg1: a segmentation :type seg1: str or list :param seg2: a segmentation :type seg2: str or list :param k: window width :type k: int :param boundary: boundary value :type boundary: str or int or bool :param weighted: use the weighted variant of windowdiff :type weighted: boolean :rtype: float
Here is the function:
def windowdiff(seg1, seg2, k, boundary="1", weighted=False):
"""
Compute the windowdiff score for a pair of segmentations. A
segmentation is any sequence over a vocabulary of two items
(e.g. "0", "1"), where the specified boundary value is used to
mark the edge of a segmentation.
>>> s1 = "000100000010"
>>> s2 = "000010000100"
>>> s3 = "100000010000"
>>> '%.2f' % windowdiff(s1, s1, 3)
'0.00'
>>> '%.2f' % windowdiff(s1, s2, 3)
'0.30'
>>> '%.2f' % windowdiff(s2, s3, 3)
'0.80'
:param seg1: a segmentation
:type seg1: str or list
:param seg2: a segmentation
:type seg2: str or list
:param k: window width
:type k: int
:param boundary: boundary value
:type boundary: str or int or bool
:param weighted: use the weighted variant of windowdiff
:type weighted: boolean
:rtype: float
"""
if len(seg1) != len(seg2):
raise ValueError("Segmentations have unequal length")
if k > len(seg1):
raise ValueError(
"Window width k should be smaller or equal than segmentation lengths"
)
wd = 0
for i in range(len(seg1) - k + 1):
ndiff = abs(seg1[i : i + k].count(boundary) - seg2[i : i + k].count(boundary))
if weighted:
wd += ndiff
else:
wd += min(1, ndiff)
return wd / (len(seg1) - k + 1.0) | Compute the windowdiff score for a pair of segmentations. A segmentation is any sequence over a vocabulary of two items (e.g. "0", "1"), where the specified boundary value is used to mark the edge of a segmentation. >>> s1 = "000100000010" >>> s2 = "000010000100" >>> s3 = "100000010000" >>> '%.2f' % windowdiff(s1, s1, 3) '0.00' >>> '%.2f' % windowdiff(s1, s2, 3) '0.30' >>> '%.2f' % windowdiff(s2, s3, 3) '0.80' :param seg1: a segmentation :type seg1: str or list :param seg2: a segmentation :type seg2: str or list :param k: window width :type k: int :param boundary: boundary value :type boundary: str or int or bool :param weighted: use the weighted variant of windowdiff :type weighted: boolean :rtype: float |
170,577 | def _init_mat(nrows, ncols, ins_cost, del_cost):
mat = np.empty((nrows, ncols))
mat[0, :] = ins_cost * np.arange(ncols)
mat[:, 0] = del_cost * np.arange(nrows)
return mat
def _ghd_aux(mat, rowv, colv, ins_cost, del_cost, shift_cost_coeff):
for i, rowi in enumerate(rowv):
for j, colj in enumerate(colv):
shift_cost = shift_cost_coeff * abs(rowi - colj) + mat[i, j]
if rowi == colj:
# boundaries are at the same location, no transformation required
tcost = mat[i, j]
elif rowi > colj:
# boundary match through a deletion
tcost = del_cost + mat[i, j + 1]
else:
# boundary match through an insertion
tcost = ins_cost + mat[i + 1, j]
mat[i + 1, j + 1] = min(tcost, shift_cost)
The provided code snippet includes necessary dependencies for implementing the `ghd` function. Write a Python function `def ghd(ref, hyp, ins_cost=2.0, del_cost=2.0, shift_cost_coeff=1.0, boundary="1")` to solve the following problem:
Compute the Generalized Hamming Distance for a reference and a hypothetical segmentation, corresponding to the cost related to the transformation of the hypothetical segmentation into the reference segmentation through boundary insertion, deletion and shift operations. A segmentation is any sequence over a vocabulary of two items (e.g. "0", "1"), where the specified boundary value is used to mark the edge of a segmentation. Recommended parameter values are a shift_cost_coeff of 2. Associated with a ins_cost, and del_cost equal to the mean segment length in the reference segmentation. >>> # Same examples as Kulyukin C++ implementation >>> ghd('1100100000', '1100010000', 1.0, 1.0, 0.5) 0.5 >>> ghd('1100100000', '1100000001', 1.0, 1.0, 0.5) 2.0 >>> ghd('011', '110', 1.0, 1.0, 0.5) 1.0 >>> ghd('1', '0', 1.0, 1.0, 0.5) 1.0 >>> ghd('111', '000', 1.0, 1.0, 0.5) 3.0 >>> ghd('000', '111', 1.0, 2.0, 0.5) 6.0 :param ref: the reference segmentation :type ref: str or list :param hyp: the hypothetical segmentation :type hyp: str or list :param ins_cost: insertion cost :type ins_cost: float :param del_cost: deletion cost :type del_cost: float :param shift_cost_coeff: constant used to compute the cost of a shift. ``shift cost = shift_cost_coeff * |i - j|`` where ``i`` and ``j`` are the positions indicating the shift :type shift_cost_coeff: float :param boundary: boundary value :type boundary: str or int or bool :rtype: float
Here is the function:
def ghd(ref, hyp, ins_cost=2.0, del_cost=2.0, shift_cost_coeff=1.0, boundary="1"):
"""
Compute the Generalized Hamming Distance for a reference and a hypothetical
segmentation, corresponding to the cost related to the transformation
of the hypothetical segmentation into the reference segmentation
through boundary insertion, deletion and shift operations.
A segmentation is any sequence over a vocabulary of two items
(e.g. "0", "1"), where the specified boundary value is used to
mark the edge of a segmentation.
Recommended parameter values are a shift_cost_coeff of 2.
Associated with a ins_cost, and del_cost equal to the mean segment
length in the reference segmentation.
>>> # Same examples as Kulyukin C++ implementation
>>> ghd('1100100000', '1100010000', 1.0, 1.0, 0.5)
0.5
>>> ghd('1100100000', '1100000001', 1.0, 1.0, 0.5)
2.0
>>> ghd('011', '110', 1.0, 1.0, 0.5)
1.0
>>> ghd('1', '0', 1.0, 1.0, 0.5)
1.0
>>> ghd('111', '000', 1.0, 1.0, 0.5)
3.0
>>> ghd('000', '111', 1.0, 2.0, 0.5)
6.0
:param ref: the reference segmentation
:type ref: str or list
:param hyp: the hypothetical segmentation
:type hyp: str or list
:param ins_cost: insertion cost
:type ins_cost: float
:param del_cost: deletion cost
:type del_cost: float
:param shift_cost_coeff: constant used to compute the cost of a shift.
``shift cost = shift_cost_coeff * |i - j|`` where ``i`` and ``j``
are the positions indicating the shift
:type shift_cost_coeff: float
:param boundary: boundary value
:type boundary: str or int or bool
:rtype: float
"""
ref_idx = [i for (i, val) in enumerate(ref) if val == boundary]
hyp_idx = [i for (i, val) in enumerate(hyp) if val == boundary]
nref_bound = len(ref_idx)
nhyp_bound = len(hyp_idx)
if nref_bound == 0 and nhyp_bound == 0:
return 0.0
elif nref_bound > 0 and nhyp_bound == 0:
return nref_bound * ins_cost
elif nref_bound == 0 and nhyp_bound > 0:
return nhyp_bound * del_cost
mat = _init_mat(nhyp_bound + 1, nref_bound + 1, ins_cost, del_cost)
_ghd_aux(mat, hyp_idx, ref_idx, ins_cost, del_cost, shift_cost_coeff)
return mat[-1, -1] | Compute the Generalized Hamming Distance for a reference and a hypothetical segmentation, corresponding to the cost related to the transformation of the hypothetical segmentation into the reference segmentation through boundary insertion, deletion and shift operations. A segmentation is any sequence over a vocabulary of two items (e.g. "0", "1"), where the specified boundary value is used to mark the edge of a segmentation. Recommended parameter values are a shift_cost_coeff of 2. Associated with a ins_cost, and del_cost equal to the mean segment length in the reference segmentation. >>> # Same examples as Kulyukin C++ implementation >>> ghd('1100100000', '1100010000', 1.0, 1.0, 0.5) 0.5 >>> ghd('1100100000', '1100000001', 1.0, 1.0, 0.5) 2.0 >>> ghd('011', '110', 1.0, 1.0, 0.5) 1.0 >>> ghd('1', '0', 1.0, 1.0, 0.5) 1.0 >>> ghd('111', '000', 1.0, 1.0, 0.5) 3.0 >>> ghd('000', '111', 1.0, 2.0, 0.5) 6.0 :param ref: the reference segmentation :type ref: str or list :param hyp: the hypothetical segmentation :type hyp: str or list :param ins_cost: insertion cost :type ins_cost: float :param del_cost: deletion cost :type del_cost: float :param shift_cost_coeff: constant used to compute the cost of a shift. ``shift cost = shift_cost_coeff * |i - j|`` where ``i`` and ``j`` are the positions indicating the shift :type shift_cost_coeff: float :param boundary: boundary value :type boundary: str or int or bool :rtype: float |
170,578 |
The provided code snippet includes necessary dependencies for implementing the `pk` function. Write a Python function `def pk(ref, hyp, k=None, boundary="1")` to solve the following problem:
Compute the Pk metric for a pair of segmentations A segmentation is any sequence over a vocabulary of two items (e.g. "0", "1"), where the specified boundary value is used to mark the edge of a segmentation. >>> '%.2f' % pk('0100'*100, '1'*400, 2) '0.50' >>> '%.2f' % pk('0100'*100, '0'*400, 2) '0.50' >>> '%.2f' % pk('0100'*100, '0100'*100, 2) '0.00' :param ref: the reference segmentation :type ref: str or list :param hyp: the segmentation to evaluate :type hyp: str or list :param k: window size, if None, set to half of the average reference segment length :type boundary: str or int or bool :param boundary: boundary value :type boundary: str or int or bool :rtype: float
Here is the function:
def pk(ref, hyp, k=None, boundary="1"):
"""
Compute the Pk metric for a pair of segmentations A segmentation
is any sequence over a vocabulary of two items (e.g. "0", "1"),
where the specified boundary value is used to mark the edge of a
segmentation.
>>> '%.2f' % pk('0100'*100, '1'*400, 2)
'0.50'
>>> '%.2f' % pk('0100'*100, '0'*400, 2)
'0.50'
>>> '%.2f' % pk('0100'*100, '0100'*100, 2)
'0.00'
:param ref: the reference segmentation
:type ref: str or list
:param hyp: the segmentation to evaluate
:type hyp: str or list
:param k: window size, if None, set to half of the average reference segment length
:type boundary: str or int or bool
:param boundary: boundary value
:type boundary: str or int or bool
:rtype: float
"""
if k is None:
k = int(round(len(ref) / (ref.count(boundary) * 2.0)))
err = 0
for i in range(len(ref) - k + 1):
r = ref[i : i + k].count(boundary) > 0
h = hyp[i : i + k].count(boundary) > 0
if r != h:
err += 1
return err / (len(ref) - k + 1.0) | Compute the Pk metric for a pair of segmentations A segmentation is any sequence over a vocabulary of two items (e.g. "0", "1"), where the specified boundary value is used to mark the edge of a segmentation. >>> '%.2f' % pk('0100'*100, '1'*400, 2) '0.50' >>> '%.2f' % pk('0100'*100, '0'*400, 2) '0.50' >>> '%.2f' % pk('0100'*100, '0100'*100, 2) '0.00' :param ref: the reference segmentation :type ref: str or list :param hyp: the segmentation to evaluate :type hyp: str or list :param k: window size, if None, set to half of the average reference segment length :type boundary: str or int or bool :param boundary: boundary value :type boundary: str or int or bool :rtype: float |
170,579 | from nltk.probability import FreqDist
class ConfusionMatrix:
"""
The confusion matrix between a list of reference values and a
corresponding list of test values. Entry *[r,t]* of this
matrix is a count of the number of times that the reference value
*r* corresponds to the test value *t*. E.g.:
>>> from nltk.metrics import ConfusionMatrix
>>> ref = 'DET NN VB DET JJ NN NN IN DET NN'.split()
>>> test = 'DET VB VB DET NN NN NN IN DET NN'.split()
>>> cm = ConfusionMatrix(ref, test)
>>> print(cm['NN', 'NN'])
3
Note that the diagonal entries *Ri=Tj* of this matrix
corresponds to correct values; and the off-diagonal entries
correspond to incorrect values.
"""
def __init__(self, reference, test, sort_by_count=False):
"""
Construct a new confusion matrix from a list of reference
values and a corresponding list of test values.
:type reference: list
:param reference: An ordered list of reference values.
:type test: list
:param test: A list of values to compare against the
corresponding reference values.
:raise ValueError: If ``reference`` and ``length`` do not have
the same length.
"""
if len(reference) != len(test):
raise ValueError("Lists must have the same length.")
# Get a list of all values.
if sort_by_count:
ref_fdist = FreqDist(reference)
test_fdist = FreqDist(test)
def key(v):
return -(ref_fdist[v] + test_fdist[v])
values = sorted(set(reference + test), key=key)
else:
values = sorted(set(reference + test))
# Construct a value->index dictionary
indices = {val: i for (i, val) in enumerate(values)}
# Make a confusion matrix table.
confusion = [[0 for _ in values] for _ in values]
max_conf = 0 # Maximum confusion
for w, g in zip(reference, test):
confusion[indices[w]][indices[g]] += 1
max_conf = max(max_conf, confusion[indices[w]][indices[g]])
#: A list of all values in ``reference`` or ``test``.
self._values = values
#: A dictionary mapping values in ``self._values`` to their indices.
self._indices = indices
#: The confusion matrix itself (as a list of lists of counts).
self._confusion = confusion
#: The greatest count in ``self._confusion`` (used for printing).
self._max_conf = max_conf
#: The total number of values in the confusion matrix.
self._total = len(reference)
#: The number of correct (on-diagonal) values in the matrix.
self._correct = sum(confusion[i][i] for i in range(len(values)))
def __getitem__(self, li_lj_tuple):
"""
:return: The number of times that value ``li`` was expected and
value ``lj`` was given.
:rtype: int
"""
(li, lj) = li_lj_tuple
i = self._indices[li]
j = self._indices[lj]
return self._confusion[i][j]
def __repr__(self):
return f"<ConfusionMatrix: {self._correct}/{self._total} correct>"
def __str__(self):
return self.pretty_format()
def pretty_format(
self,
show_percents=False,
values_in_chart=True,
truncate=None,
sort_by_count=False,
):
"""
:return: A multi-line string representation of this confusion matrix.
:type truncate: int
:param truncate: If specified, then only show the specified
number of values. Any sorting (e.g., sort_by_count)
will be performed before truncation.
:param sort_by_count: If true, then sort by the count of each
label in the reference data. I.e., labels that occur more
frequently in the reference label will be towards the left
edge of the matrix, and labels that occur less frequently
will be towards the right edge.
"""
confusion = self._confusion
values = self._values
if sort_by_count:
values = sorted(
values, key=lambda v: -sum(self._confusion[self._indices[v]])
)
if truncate:
values = values[:truncate]
if values_in_chart:
value_strings = ["%s" % val for val in values]
else:
value_strings = [str(n + 1) for n in range(len(values))]
# Construct a format string for row values
valuelen = max(len(val) for val in value_strings)
value_format = "%" + repr(valuelen) + "s | "
# Construct a format string for matrix entries
if show_percents:
entrylen = 6
entry_format = "%5.1f%%"
zerostr = " ."
else:
entrylen = len(repr(self._max_conf))
entry_format = "%" + repr(entrylen) + "d"
zerostr = " " * (entrylen - 1) + "."
# Write the column values.
s = ""
for i in range(valuelen):
s += (" " * valuelen) + " |"
for val in value_strings:
if i >= valuelen - len(val):
s += val[i - valuelen + len(val)].rjust(entrylen + 1)
else:
s += " " * (entrylen + 1)
s += " |\n"
# Write a dividing line
s += "{}-+-{}+\n".format("-" * valuelen, "-" * ((entrylen + 1) * len(values)))
# Write the entries.
for val, li in zip(value_strings, values):
i = self._indices[li]
s += value_format % val
for lj in values:
j = self._indices[lj]
if confusion[i][j] == 0:
s += zerostr
elif show_percents:
s += entry_format % (100.0 * confusion[i][j] / self._total)
else:
s += entry_format % confusion[i][j]
if i == j:
prevspace = s.rfind(" ")
s = s[:prevspace] + "<" + s[prevspace + 1 :] + ">"
else:
s += " "
s += "|\n"
# Write a dividing line
s += "{}-+-{}+\n".format("-" * valuelen, "-" * ((entrylen + 1) * len(values)))
# Write a key
s += "(row = reference; col = test)\n"
if not values_in_chart:
s += "Value key:\n"
for i, value in enumerate(values):
s += "%6d: %s\n" % (i + 1, value)
return s
def key(self):
values = self._values
str = "Value key:\n"
indexlen = len(repr(len(values) - 1))
key_format = " %" + repr(indexlen) + "d: %s\n"
for i in range(len(values)):
str += key_format % (i, values[i])
return str
def recall(self, value):
"""Given a value in the confusion matrix, return the recall
that corresponds to this value. The recall is defined as:
- *r* = true positive / (true positive + false positive)
and can loosely be considered the ratio of how often ``value``
was predicted correctly relative to how often ``value`` was
the true result.
:param value: value used in the ConfusionMatrix
:return: the recall corresponding to ``value``.
:rtype: float
"""
# Number of times `value` was correct, and also predicted
TP = self[value, value]
# Number of times `value` was correct
TP_FN = sum(self[value, pred_value] for pred_value in self._values)
if TP_FN == 0:
return 0.0
return TP / TP_FN
def precision(self, value):
"""Given a value in the confusion matrix, return the precision
that corresponds to this value. The precision is defined as:
- *p* = true positive / (true positive + false negative)
and can loosely be considered the ratio of how often ``value``
was predicted correctly relative to the number of predictions
for ``value``.
:param value: value used in the ConfusionMatrix
:return: the precision corresponding to ``value``.
:rtype: float
"""
# Number of times `value` was correct, and also predicted
TP = self[value, value]
# Number of times `value` was predicted
TP_FP = sum(self[real_value, value] for real_value in self._values)
if TP_FP == 0:
return 0.0
return TP / TP_FP
def f_measure(self, value, alpha=0.5):
"""
Given a value used in the confusion matrix, return the f-measure
that corresponds to this value. The f-measure is the harmonic mean
of the ``precision`` and ``recall``, weighted by ``alpha``.
In particular, given the precision *p* and recall *r* defined by:
- *p* = true positive / (true positive + false negative)
- *r* = true positive / (true positive + false positive)
The f-measure is:
- *1/(alpha/p + (1-alpha)/r)*
With ``alpha = 0.5``, this reduces to:
- *2pr / (p + r)*
:param value: value used in the ConfusionMatrix
:param alpha: Ratio of the cost of false negative compared to false
positives. Defaults to 0.5, where the costs are equal.
:type alpha: float
:return: the F-measure corresponding to ``value``.
:rtype: float
"""
p = self.precision(value)
r = self.recall(value)
if p == 0.0 or r == 0.0:
return 0.0
return 1.0 / (alpha / p + (1 - alpha) / r)
def evaluate(self, alpha=0.5, truncate=None, sort_by_count=False):
"""
Tabulate the **recall**, **precision** and **f-measure**
for each value in this confusion matrix.
>>> reference = "DET NN VB DET JJ NN NN IN DET NN".split()
>>> test = "DET VB VB DET NN NN NN IN DET NN".split()
>>> cm = ConfusionMatrix(reference, test)
>>> print(cm.evaluate())
Tag | Prec. | Recall | F-measure
----+--------+--------+-----------
DET | 1.0000 | 1.0000 | 1.0000
IN | 1.0000 | 1.0000 | 1.0000
JJ | 0.0000 | 0.0000 | 0.0000
NN | 0.7500 | 0.7500 | 0.7500
VB | 0.5000 | 1.0000 | 0.6667
<BLANKLINE>
:param alpha: Ratio of the cost of false negative compared to false
positives, as used in the f-measure computation. Defaults to 0.5,
where the costs are equal.
:type alpha: float
:param truncate: If specified, then only show the specified
number of values. Any sorting (e.g., sort_by_count)
will be performed before truncation. Defaults to None
:type truncate: int, optional
:param sort_by_count: Whether to sort the outputs on frequency
in the reference label. Defaults to False.
:type sort_by_count: bool, optional
:return: A tabulated recall, precision and f-measure string
:rtype: str
"""
tags = self._values
# Apply keyword parameters
if sort_by_count:
tags = sorted(tags, key=lambda v: -sum(self._confusion[self._indices[v]]))
if truncate:
tags = tags[:truncate]
tag_column_len = max(max(len(tag) for tag in tags), 3)
# Construct the header
s = (
f"{' ' * (tag_column_len - 3)}Tag | Prec. | Recall | F-measure\n"
f"{'-' * tag_column_len}-+--------+--------+-----------\n"
)
# Construct the body
for tag in tags:
s += (
f"{tag:>{tag_column_len}} | "
f"{self.precision(tag):<6.4f} | "
f"{self.recall(tag):<6.4f} | "
f"{self.f_measure(tag, alpha=alpha):.4f}\n"
)
return s
def demo():
reference = "DET NN VB DET JJ NN NN IN DET NN".split()
test = "DET VB VB DET NN NN NN IN DET NN".split()
print("Reference =", reference)
print("Test =", test)
print("Confusion matrix:")
print(ConfusionMatrix(reference, test))
print(ConfusionMatrix(reference, test).pretty_format(sort_by_count=True))
print(ConfusionMatrix(reference, test).recall("VB")) | null |
170,580 | import operator
import warnings
def _edit_dist_init(len1, len2):
lev = []
for i in range(len1):
lev.append([0] * len2) # initialize 2D array to zero
for i in range(len1):
lev[i][0] = i # column 0: 0,1,2,3,4,...
for j in range(len2):
lev[0][j] = j # row 0: 0,1,2,3,4,...
return lev
def _edit_dist_step(
lev, i, j, s1, s2, last_left, last_right, substitution_cost=1, transpositions=False
):
c1 = s1[i - 1]
c2 = s2[j - 1]
# skipping a character in s1
a = lev[i - 1][j] + 1
# skipping a character in s2
b = lev[i][j - 1] + 1
# substitution
c = lev[i - 1][j - 1] + (substitution_cost if c1 != c2 else 0)
# transposition
d = c + 1 # never picked by default
if transpositions and last_left > 0 and last_right > 0:
d = lev[last_left - 1][last_right - 1] + i - last_left + j - last_right - 1
# pick the cheapest
lev[i][j] = min(a, b, c, d)
def _edit_dist_backtrace(lev):
i, j = len(lev) - 1, len(lev[0]) - 1
alignment = [(i, j)]
while (i, j) != (0, 0):
directions = [
(i - 1, j - 1), # substitution
(i - 1, j), # skip s1
(i, j - 1), # skip s2
]
direction_costs = (
(lev[i][j] if (i >= 0 and j >= 0) else float("inf"), (i, j))
for i, j in directions
)
_, (i, j) = min(direction_costs, key=operator.itemgetter(0))
alignment.append((i, j))
return list(reversed(alignment))
The provided code snippet includes necessary dependencies for implementing the `edit_distance_align` function. Write a Python function `def edit_distance_align(s1, s2, substitution_cost=1)` to solve the following problem:
Calculate the minimum Levenshtein edit-distance based alignment mapping between two strings. The alignment finds the mapping from string s1 to s2 that minimizes the edit distance cost. For example, mapping "rain" to "shine" would involve 2 substitutions, 2 matches and an insertion resulting in the following mapping: [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (4, 5)] NB: (0, 0) is the start state without any letters associated See more: https://web.stanford.edu/class/cs124/lec/med.pdf In case of multiple valid minimum-distance alignments, the backtrace has the following operation precedence: 1. Substitute s1 and s2 characters 2. Skip s1 character 3. Skip s2 character The backtrace is carried out in reverse string order. This function does not support transposition. :param s1, s2: The strings to be aligned :type s1: str :type s2: str :type substitution_cost: int :rtype: List[Tuple(int, int)]
Here is the function:
def edit_distance_align(s1, s2, substitution_cost=1):
"""
Calculate the minimum Levenshtein edit-distance based alignment
mapping between two strings. The alignment finds the mapping
from string s1 to s2 that minimizes the edit distance cost.
For example, mapping "rain" to "shine" would involve 2
substitutions, 2 matches and an insertion resulting in
the following mapping:
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (4, 5)]
NB: (0, 0) is the start state without any letters associated
See more: https://web.stanford.edu/class/cs124/lec/med.pdf
In case of multiple valid minimum-distance alignments, the
backtrace has the following operation precedence:
1. Substitute s1 and s2 characters
2. Skip s1 character
3. Skip s2 character
The backtrace is carried out in reverse string order.
This function does not support transposition.
:param s1, s2: The strings to be aligned
:type s1: str
:type s2: str
:type substitution_cost: int
:rtype: List[Tuple(int, int)]
"""
# set up a 2-D array
len1 = len(s1)
len2 = len(s2)
lev = _edit_dist_init(len1 + 1, len2 + 1)
# iterate over the array
for i in range(len1):
for j in range(len2):
_edit_dist_step(
lev,
i + 1,
j + 1,
s1,
s2,
0,
0,
substitution_cost=substitution_cost,
transpositions=False,
)
# backtrace to find alignment
alignment = _edit_dist_backtrace(lev)
return alignment | Calculate the minimum Levenshtein edit-distance based alignment mapping between two strings. The alignment finds the mapping from string s1 to s2 that minimizes the edit distance cost. For example, mapping "rain" to "shine" would involve 2 substitutions, 2 matches and an insertion resulting in the following mapping: [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (4, 5)] NB: (0, 0) is the start state without any letters associated See more: https://web.stanford.edu/class/cs124/lec/med.pdf In case of multiple valid minimum-distance alignments, the backtrace has the following operation precedence: 1. Substitute s1 and s2 characters 2. Skip s1 character 3. Skip s2 character The backtrace is carried out in reverse string order. This function does not support transposition. :param s1, s2: The strings to be aligned :type s1: str :type s2: str :type substitution_cost: int :rtype: List[Tuple(int, int)] |
170,581 | import operator
import warnings
The provided code snippet includes necessary dependencies for implementing the `interval_distance` function. Write a Python function `def interval_distance(label1, label2)` to solve the following problem:
Krippendorff's interval distance metric >>> from nltk.metrics import interval_distance >>> interval_distance(1,10) 81 Krippendorff 1980, Content Analysis: An Introduction to its Methodology
Here is the function:
def interval_distance(label1, label2):
"""Krippendorff's interval distance metric
>>> from nltk.metrics import interval_distance
>>> interval_distance(1,10)
81
Krippendorff 1980, Content Analysis: An Introduction to its Methodology
"""
try:
return pow(label1 - label2, 2)
# return pow(list(label1)[0]-list(label2)[0],2)
except:
print("non-numeric labels not supported with interval distance") | Krippendorff's interval distance metric >>> from nltk.metrics import interval_distance >>> interval_distance(1,10) 81 Krippendorff 1980, Content Analysis: An Introduction to its Methodology |
170,582 | import operator
import warnings
The provided code snippet includes necessary dependencies for implementing the `presence` function. Write a Python function `def presence(label)` to solve the following problem:
Higher-order function to test presence of a given label
Here is the function:
def presence(label):
"""Higher-order function to test presence of a given label"""
return lambda x, y: 1.0 * ((label in x) == (label in y)) | Higher-order function to test presence of a given label |
170,583 | import operator
import warnings
def fractional_presence(label):
return (
lambda x, y: abs((1.0 / len(x)) - (1.0 / len(y))) * (label in x and label in y)
or 0.0 * (label not in x and label not in y)
or abs(1.0 / len(x)) * (label in x and label not in y)
or (1.0 / len(y)) * (label not in x and label in y)
) | null |
170,584 | import operator
import warnings
def custom_distance(file):
data = {}
with open(file) as infile:
for l in infile:
labelA, labelB, dist = l.strip().split("\t")
labelA = frozenset([labelA])
labelB = frozenset([labelB])
data[frozenset([labelA, labelB])] = float(dist)
return lambda x, y: data[frozenset([x, y])] | null |
170,585 | import operator
import warnings
def edit_distance(s1, s2, substitution_cost=1, transpositions=False):
"""
Calculate the Levenshtein edit-distance between two strings.
The edit distance is the number of characters that need to be
substituted, inserted, or deleted, to transform s1 into s2. For
example, transforming "rain" to "shine" requires three steps,
consisting of two substitutions and one insertion:
"rain" -> "sain" -> "shin" -> "shine". These operations could have
been done in other orders, but at least three steps are needed.
Allows specifying the cost of substitution edits (e.g., "a" -> "b"),
because sometimes it makes sense to assign greater penalties to
substitutions.
This also optionally allows transposition edits (e.g., "ab" -> "ba"),
though this is disabled by default.
:param s1, s2: The strings to be analysed
:param transpositions: Whether to allow transposition edits
:type s1: str
:type s2: str
:type substitution_cost: int
:type transpositions: bool
:rtype: int
"""
# set up a 2-D array
len1 = len(s1)
len2 = len(s2)
lev = _edit_dist_init(len1 + 1, len2 + 1)
# retrieve alphabet
sigma = set()
sigma.update(s1)
sigma.update(s2)
# set up table to remember positions of last seen occurrence in s1
last_left_t = _last_left_t_init(sigma)
# iterate over the array
# i and j start from 1 and not 0 to stay close to the wikipedia pseudo-code
# see https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance
for i in range(1, len1 + 1):
last_right_buf = 0
for j in range(1, len2 + 1):
last_left = last_left_t[s2[j - 1]]
last_right = last_right_buf
if s1[i - 1] == s2[j - 1]:
last_right_buf = j
_edit_dist_step(
lev,
i,
j,
s1,
s2,
last_left,
last_right,
substitution_cost=substitution_cost,
transpositions=transpositions,
)
last_left_t[s1[i - 1]] = i
return lev[len1][len2]
def binary_distance(label1, label2):
"""Simple equality test.
0.0 if the labels are identical, 1.0 if they are different.
>>> from nltk.metrics import binary_distance
>>> binary_distance(1,1)
0.0
>>> binary_distance(1,3)
1.0
"""
return 0.0 if label1 == label2 else 1.0
def jaccard_distance(label1, label2):
"""Distance metric comparing set-similarity."""
return (len(label1.union(label2)) - len(label1.intersection(label2))) / len(
label1.union(label2)
)
def masi_distance(label1, label2):
"""Distance metric that takes into account partial agreement when multiple
labels are assigned.
>>> from nltk.metrics import masi_distance
>>> masi_distance(set([1, 2]), set([1, 2, 3, 4]))
0.665
Passonneau 2006, Measuring Agreement on Set-Valued Items (MASI)
for Semantic and Pragmatic Annotation.
"""
len_intersection = len(label1.intersection(label2))
len_union = len(label1.union(label2))
len_label1 = len(label1)
len_label2 = len(label2)
if len_label1 == len_label2 and len_label1 == len_intersection:
m = 1
elif len_intersection == min(len_label1, len_label2):
m = 0.67
elif len_intersection > 0:
m = 0.33
else:
m = 0
return 1 - len_intersection / len_union * m
def jaro_similarity(s1, s2):
"""
Computes the Jaro similarity between 2 sequences from:
Matthew A. Jaro (1989). Advances in record linkage methodology
as applied to the 1985 census of Tampa Florida. Journal of the
American Statistical Association. 84 (406): 414-20.
The Jaro distance between is the min no. of single-character transpositions
required to change one word into another. The Jaro similarity formula from
https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance :
``jaro_sim = 0 if m = 0 else 1/3 * (m/|s_1| + m/s_2 + (m-t)/m)``
where
- `|s_i|` is the length of string `s_i`
- `m` is the no. of matching characters
- `t` is the half no. of possible transpositions.
"""
# First, store the length of the strings
# because they will be re-used several times.
len_s1, len_s2 = len(s1), len(s2)
# The upper bound of the distance for being a matched character.
match_bound = max(len_s1, len_s2) // 2 - 1
# Initialize the counts for matches and transpositions.
matches = 0 # no.of matched characters in s1 and s2
transpositions = 0 # no. of transpositions between s1 and s2
flagged_1 = [] # positions in s1 which are matches to some character in s2
flagged_2 = [] # positions in s2 which are matches to some character in s1
# Iterate through sequences, check for matches and compute transpositions.
for i in range(len_s1): # Iterate through each character.
upperbound = min(i + match_bound, len_s2 - 1)
lowerbound = max(0, i - match_bound)
for j in range(lowerbound, upperbound + 1):
if s1[i] == s2[j] and j not in flagged_2:
matches += 1
flagged_1.append(i)
flagged_2.append(j)
break
flagged_2.sort()
for i, j in zip(flagged_1, flagged_2):
if s1[i] != s2[j]:
transpositions += 1
if matches == 0:
return 0
else:
return (
1
/ 3
* (
matches / len_s1
+ matches / len_s2
+ (matches - transpositions // 2) / matches
)
)
def jaro_winkler_similarity(s1, s2, p=0.1, max_l=4):
"""
The Jaro Winkler distance is an extension of the Jaro similarity in:
William E. Winkler. 1990. String Comparator Metrics and Enhanced
Decision Rules in the Fellegi-Sunter Model of Record Linkage.
Proceedings of the Section on Survey Research Methods.
American Statistical Association: 354-359.
such that:
jaro_winkler_sim = jaro_sim + ( l * p * (1 - jaro_sim) )
where,
- jaro_sim is the output from the Jaro Similarity,
see jaro_similarity()
- l is the length of common prefix at the start of the string
- this implementation provides an upperbound for the l value
to keep the prefixes.A common value of this upperbound is 4.
- p is the constant scaling factor to overweigh common prefixes.
The Jaro-Winkler similarity will fall within the [0, 1] bound,
given that max(p)<=0.25 , default is p=0.1 in Winkler (1990)
Test using outputs from https://www.census.gov/srd/papers/pdf/rr93-8.pdf
from "Table 5 Comparison of String Comparators Rescaled between 0 and 1"
>>> winkler_examples = [("billy", "billy"), ("billy", "bill"), ("billy", "blily"),
... ("massie", "massey"), ("yvette", "yevett"), ("billy", "bolly"), ("dwayne", "duane"),
... ("dixon", "dickson"), ("billy", "susan")]
>>> winkler_scores = [1.000, 0.967, 0.947, 0.944, 0.911, 0.893, 0.858, 0.853, 0.000]
>>> jaro_scores = [1.000, 0.933, 0.933, 0.889, 0.889, 0.867, 0.822, 0.790, 0.000]
One way to match the values on the Winkler's paper is to provide a different
p scaling factor for different pairs of strings, e.g.
>>> p_factors = [0.1, 0.125, 0.20, 0.125, 0.20, 0.20, 0.20, 0.15, 0.1]
>>> for (s1, s2), jscore, wscore, p in zip(winkler_examples, jaro_scores, winkler_scores, p_factors):
... assert round(jaro_similarity(s1, s2), 3) == jscore
... assert round(jaro_winkler_similarity(s1, s2, p=p), 3) == wscore
Test using outputs from https://www.census.gov/srd/papers/pdf/rr94-5.pdf from
"Table 2.1. Comparison of String Comparators Using Last Names, First Names, and Street Names"
>>> winkler_examples = [('SHACKLEFORD', 'SHACKELFORD'), ('DUNNINGHAM', 'CUNNIGHAM'),
... ('NICHLESON', 'NICHULSON'), ('JONES', 'JOHNSON'), ('MASSEY', 'MASSIE'),
... ('ABROMS', 'ABRAMS'), ('HARDIN', 'MARTINEZ'), ('ITMAN', 'SMITH'),
... ('JERALDINE', 'GERALDINE'), ('MARHTA', 'MARTHA'), ('MICHELLE', 'MICHAEL'),
... ('JULIES', 'JULIUS'), ('TANYA', 'TONYA'), ('DWAYNE', 'DUANE'), ('SEAN', 'SUSAN'),
... ('JON', 'JOHN'), ('JON', 'JAN'), ('BROOKHAVEN', 'BRROKHAVEN'),
... ('BROOK HALLOW', 'BROOK HLLW'), ('DECATUR', 'DECATIR'), ('FITZRUREITER', 'FITZENREITER'),
... ('HIGBEE', 'HIGHEE'), ('HIGBEE', 'HIGVEE'), ('LACURA', 'LOCURA'), ('IOWA', 'IONA'), ('1ST', 'IST')]
>>> jaro_scores = [0.970, 0.896, 0.926, 0.790, 0.889, 0.889, 0.722, 0.467, 0.926,
... 0.944, 0.869, 0.889, 0.867, 0.822, 0.783, 0.917, 0.000, 0.933, 0.944, 0.905,
... 0.856, 0.889, 0.889, 0.889, 0.833, 0.000]
>>> winkler_scores = [0.982, 0.896, 0.956, 0.832, 0.944, 0.922, 0.722, 0.467, 0.926,
... 0.961, 0.921, 0.933, 0.880, 0.858, 0.805, 0.933, 0.000, 0.947, 0.967, 0.943,
... 0.913, 0.922, 0.922, 0.900, 0.867, 0.000]
One way to match the values on the Winkler's paper is to provide a different
p scaling factor for different pairs of strings, e.g.
>>> p_factors = [0.1, 0.1, 0.1, 0.1, 0.125, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.20,
... 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
>>> for (s1, s2), jscore, wscore, p in zip(winkler_examples, jaro_scores, winkler_scores, p_factors):
... if (s1, s2) in [('JON', 'JAN'), ('1ST', 'IST')]:
... continue # Skip bad examples from the paper.
... assert round(jaro_similarity(s1, s2), 3) == jscore
... assert round(jaro_winkler_similarity(s1, s2, p=p), 3) == wscore
This test-case proves that the output of Jaro-Winkler similarity depends on
the product l * p and not on the product max_l * p. Here the product max_l * p > 1
however the product l * p <= 1
>>> round(jaro_winkler_similarity('TANYA', 'TONYA', p=0.1, max_l=100), 3)
0.88
"""
# To ensure that the output of the Jaro-Winkler's similarity
# falls between [0,1], the product of l * p needs to be
# also fall between [0,1].
if not 0 <= max_l * p <= 1:
warnings.warn(
str(
"The product `max_l * p` might not fall between [0,1]."
"Jaro-Winkler similarity might not be between 0 and 1."
)
)
# Compute the Jaro similarity
jaro_sim = jaro_similarity(s1, s2)
# Initialize the upper bound for the no. of prefixes.
# if user did not pre-define the upperbound,
# use shorter length between s1 and s2
# Compute the prefix matches.
l = 0
# zip() will automatically loop until the end of shorter string.
for s1_i, s2_i in zip(s1, s2):
if s1_i == s2_i:
l += 1
else:
break
if l == max_l:
break
# Return the similarity value as described in docstring.
return jaro_sim + (l * p * (1 - jaro_sim))
def demo():
string_distance_examples = [
("rain", "shine"),
("abcdef", "acbdef"),
("language", "lnaguaeg"),
("language", "lnaugage"),
("language", "lngauage"),
]
for s1, s2 in string_distance_examples:
print(f"Edit distance btwn '{s1}' and '{s2}':", edit_distance(s1, s2))
print(
f"Edit dist with transpositions btwn '{s1}' and '{s2}':",
edit_distance(s1, s2, transpositions=True),
)
print(f"Jaro similarity btwn '{s1}' and '{s2}':", jaro_similarity(s1, s2))
print(
f"Jaro-Winkler similarity btwn '{s1}' and '{s2}':",
jaro_winkler_similarity(s1, s2),
)
print(
f"Jaro-Winkler distance btwn '{s1}' and '{s2}':",
1 - jaro_winkler_similarity(s1, s2),
)
s1 = {1, 2, 3, 4}
s2 = {3, 4, 5}
print("s1:", s1)
print("s2:", s2)
print("Binary distance:", binary_distance(s1, s2))
print("Jaccard distance:", jaccard_distance(s1, s2))
print("MASI distance:", masi_distance(s1, s2)) | null |
170,586 | import operator
from functools import reduce
from math import fabs
from random import shuffle
from nltk.util import LazyConcatenation, LazyMap
The provided code snippet includes necessary dependencies for implementing the `log_likelihood` function. Write a Python function `def log_likelihood(reference, test)` to solve the following problem:
Given a list of reference values and a corresponding list of test probability distributions, return the average log likelihood of the reference values, given the probability distributions. :param reference: A list of reference values :type reference: list :param test: A list of probability distributions over values to compare against the corresponding reference values. :type test: list(ProbDistI)
Here is the function:
def log_likelihood(reference, test):
"""
Given a list of reference values and a corresponding list of test
probability distributions, return the average log likelihood of
the reference values, given the probability distributions.
:param reference: A list of reference values
:type reference: list
:param test: A list of probability distributions over values to
compare against the corresponding reference values.
:type test: list(ProbDistI)
"""
if len(reference) != len(test):
raise ValueError("Lists must have the same length.")
# Return the average value of dist.logprob(val).
total_likelihood = sum(dist.logprob(val) for (val, dist) in zip(reference, test))
return total_likelihood / len(reference) | Given a list of reference values and a corresponding list of test probability distributions, return the average log likelihood of the reference values, given the probability distributions. :param reference: A list of reference values :type reference: list :param test: A list of probability distributions over values to compare against the corresponding reference values. :type test: list(ProbDistI) |
170,587 | import operator
from functools import reduce
from math import fabs
from random import shuffle
from nltk.util import LazyConcatenation, LazyMap
def reduce(function: Callable[[_T, _S], _T], sequence: Iterable[_S], initial: _T) -> _T: ...
def reduce(function: Callable[[_T, _T], _T], sequence: Iterable[_T]) -> _T: ...
def fabs(__x: SupportsFloat) -> float: ...
def shuffle(x: MutableSequence[Any], random: Optional[Callable[[], float]] = ...) -> None: ...
The provided code snippet includes necessary dependencies for implementing the `approxrand` function. Write a Python function `def approxrand(a, b, **kwargs)` to solve the following problem:
Returns an approximate significance level between two lists of independently generated test values. Approximate randomization calculates significance by randomly drawing from a sample of the possible permutations. At the limit of the number of possible permutations, the significance level is exact. The approximate significance level is the sample mean number of times the statistic of the permutated lists varies from the actual statistic of the unpermuted argument lists. :return: a tuple containing an approximate significance level, the count of the number of times the pseudo-statistic varied from the actual statistic, and the number of shuffles :rtype: tuple :param a: a list of test values :type a: list :param b: another list of independently generated test values :type b: list
Here is the function:
def approxrand(a, b, **kwargs):
"""
Returns an approximate significance level between two lists of
independently generated test values.
Approximate randomization calculates significance by randomly drawing
from a sample of the possible permutations. At the limit of the number
of possible permutations, the significance level is exact. The
approximate significance level is the sample mean number of times the
statistic of the permutated lists varies from the actual statistic of
the unpermuted argument lists.
:return: a tuple containing an approximate significance level, the count
of the number of times the pseudo-statistic varied from the
actual statistic, and the number of shuffles
:rtype: tuple
:param a: a list of test values
:type a: list
:param b: another list of independently generated test values
:type b: list
"""
shuffles = kwargs.get("shuffles", 999)
# there's no point in trying to shuffle beyond all possible permutations
shuffles = min(shuffles, reduce(operator.mul, range(1, len(a) + len(b) + 1)))
stat = kwargs.get("statistic", lambda lst: sum(lst) / len(lst))
verbose = kwargs.get("verbose", False)
if verbose:
print("shuffles: %d" % shuffles)
actual_stat = fabs(stat(a) - stat(b))
if verbose:
print("actual statistic: %f" % actual_stat)
print("-" * 60)
c = 1e-100
lst = LazyConcatenation([a, b])
indices = list(range(len(a) + len(b)))
for i in range(shuffles):
if verbose and i % 10 == 0:
print("shuffle: %d" % i)
shuffle(indices)
pseudo_stat_a = stat(LazyMap(lambda i: lst[i], indices[: len(a)]))
pseudo_stat_b = stat(LazyMap(lambda i: lst[i], indices[len(a) :]))
pseudo_stat = fabs(pseudo_stat_a - pseudo_stat_b)
if pseudo_stat >= actual_stat:
c += 1
if verbose and i % 10 == 0:
print("pseudo-statistic: %f" % pseudo_stat)
print("significance: %f" % ((c + 1) / (i + 1)))
print("-" * 60)
significance = (c + 1) / (shuffles + 1)
if verbose:
print("significance: %f" % significance)
if betai:
for phi in [0.01, 0.05, 0.10, 0.15, 0.25, 0.50]:
print(f"prob(phi<={phi:f}): {betai(c, shuffles, phi):f}")
return (significance, c, shuffles) | Returns an approximate significance level between two lists of independently generated test values. Approximate randomization calculates significance by randomly drawing from a sample of the possible permutations. At the limit of the number of possible permutations, the significance level is exact. The approximate significance level is the sample mean number of times the statistic of the permutated lists varies from the actual statistic of the unpermuted argument lists. :return: a tuple containing an approximate significance level, the count of the number of times the pseudo-statistic varied from the actual statistic, and the number of shuffles :rtype: tuple :param a: a list of test values :type a: list :param b: another list of independently generated test values :type b: list |
170,588 | import operator
from functools import reduce
from math import fabs
from random import shuffle
from nltk.util import LazyConcatenation, LazyMap
def accuracy(reference, test):
"""
Given a list of reference values and a corresponding list of test
values, return the fraction of corresponding values that are
equal. In particular, return the fraction of indices
``0<i<=len(test)`` such that ``test[i] == reference[i]``.
:type reference: list
:param reference: An ordered list of reference values.
:type test: list
:param test: A list of values to compare against the corresponding
reference values.
:raise ValueError: If ``reference`` and ``length`` do not have the
same length.
"""
if len(reference) != len(test):
raise ValueError("Lists must have the same length.")
return sum(x == y for x, y in zip(reference, test)) / len(test)
def precision(reference, test):
"""
Given a set of reference values and a set of test values, return
the fraction of test values that appear in the reference set.
In particular, return card(``reference`` intersection ``test``)/card(``test``).
If ``test`` is empty, then return None.
:type reference: set
:param reference: A set of reference values.
:type test: set
:param test: A set of values to compare against the reference set.
:rtype: float or None
"""
if not hasattr(reference, "intersection") or not hasattr(test, "intersection"):
raise TypeError("reference and test should be sets")
if len(test) == 0:
return None
else:
return len(reference.intersection(test)) / len(test)
def recall(reference, test):
"""
Given a set of reference values and a set of test values, return
the fraction of reference values that appear in the test set.
In particular, return card(``reference`` intersection ``test``)/card(``reference``).
If ``reference`` is empty, then return None.
:type reference: set
:param reference: A set of reference values.
:type test: set
:param test: A set of values to compare against the reference set.
:rtype: float or None
"""
if not hasattr(reference, "intersection") or not hasattr(test, "intersection"):
raise TypeError("reference and test should be sets")
if len(reference) == 0:
return None
else:
return len(reference.intersection(test)) / len(reference)
def f_measure(reference, test, alpha=0.5):
"""
Given a set of reference values and a set of test values, return
the f-measure of the test values, when compared against the
reference values. The f-measure is the harmonic mean of the
``precision`` and ``recall``, weighted by ``alpha``. In particular,
given the precision *p* and recall *r* defined by:
- *p* = card(``reference`` intersection ``test``)/card(``test``)
- *r* = card(``reference`` intersection ``test``)/card(``reference``)
The f-measure is:
- *1/(alpha/p + (1-alpha)/r)*
If either ``reference`` or ``test`` is empty, then ``f_measure``
returns None.
:type reference: set
:param reference: A set of reference values.
:type test: set
:param test: A set of values to compare against the reference set.
:rtype: float or None
"""
p = precision(reference, test)
r = recall(reference, test)
if p is None or r is None:
return None
if p == 0 or r == 0:
return 0
return 1.0 / (alpha / p + (1 - alpha) / r)
def demo():
print("-" * 75)
reference = "DET NN VB DET JJ NN NN IN DET NN".split()
test = "DET VB VB DET NN NN NN IN DET NN".split()
print("Reference =", reference)
print("Test =", test)
print("Accuracy:", accuracy(reference, test))
print("-" * 75)
reference_set = set(reference)
test_set = set(test)
print("Reference =", reference_set)
print("Test = ", test_set)
print("Precision:", precision(reference_set, test_set))
print(" Recall:", recall(reference_set, test_set))
print("F-Measure:", f_measure(reference_set, test_set))
print("-" * 75) | null |
170,589 | import math as _math
from abc import ABCMeta, abstractmethod
from functools import reduce
def fisher_exact(*_args, **_kwargs):
raise NotImplementedError | null |
170,590 | import copy
import re
from functools import total_ordering
from nltk.internals import raise_unorderable_types, read_str
from nltk.sem.logic import (
Expression,
LogicalExpressionException,
LogicParser,
SubstituteBindingsI,
Variable,
)
_FROZEN_ERROR = "Frozen FeatStructs may not be modified."
_FROZEN_NOTICE = "\n%sIf self is frozen, raise ValueError."
The provided code snippet includes necessary dependencies for implementing the `_check_frozen` function. Write a Python function `def _check_frozen(method, indent="")` to solve the following problem:
Given a method function, return a new method function that first checks if ``self._frozen`` is true; and if so, raises ``ValueError`` with an appropriate message. Otherwise, call the method and return its result.
Here is the function:
def _check_frozen(method, indent=""):
"""
Given a method function, return a new method function that first
checks if ``self._frozen`` is true; and if so, raises ``ValueError``
with an appropriate message. Otherwise, call the method and return
its result.
"""
def wrapped(self, *args, **kwargs):
if self._frozen:
raise ValueError(_FROZEN_ERROR)
else:
return method(self, *args, **kwargs)
wrapped.__name__ = method.__name__
wrapped.__doc__ = (method.__doc__ or "") + (_FROZEN_NOTICE % indent)
return wrapped | Given a method function, return a new method function that first checks if ``self._frozen`` is true; and if so, raises ``ValueError`` with an appropriate message. Otherwise, call the method and return its result. |
170,591 | import copy
import re
from functools import total_ordering
from nltk.internals import raise_unorderable_types, read_str
from nltk.sem.logic import (
Expression,
LogicalExpressionException,
LogicParser,
SubstituteBindingsI,
Variable,
)
def _retract_bindings(fstruct, inv_bindings, fs_class, visited):
# Visit each node only once:
if id(fstruct) in visited:
return
visited.add(id(fstruct))
if _is_mapping(fstruct):
items = fstruct.items()
elif _is_sequence(fstruct):
items = enumerate(fstruct)
else:
raise ValueError("Expected mapping or sequence")
for (fname, fval) in items:
if isinstance(fval, fs_class):
if id(fval) in inv_bindings:
fstruct[fname] = inv_bindings[id(fval)]
_retract_bindings(fval, inv_bindings, fs_class, visited)
def _default_fs_class(obj):
if isinstance(obj, FeatStruct):
return FeatStruct
if isinstance(obj, (dict, list)):
return (dict, list)
else:
raise ValueError(
"To unify objects of type %s, you must specify "
"fs_class explicitly." % obj.__class__.__name__
)
The provided code snippet includes necessary dependencies for implementing the `retract_bindings` function. Write a Python function `def retract_bindings(fstruct, bindings, fs_class="default")` to solve the following problem:
Return the feature structure that is obtained by replacing each feature structure value that is bound by ``bindings`` with the variable that binds it. A feature structure value must be identical to a bound value (i.e., have equal id) to be replaced. ``bindings`` is modified to point to this new feature structure, rather than the original feature structure. Feature structure values in ``bindings`` may be modified if they are contained in ``fstruct``.
Here is the function:
def retract_bindings(fstruct, bindings, fs_class="default"):
"""
Return the feature structure that is obtained by replacing each
feature structure value that is bound by ``bindings`` with the
variable that binds it. A feature structure value must be
identical to a bound value (i.e., have equal id) to be replaced.
``bindings`` is modified to point to this new feature structure,
rather than the original feature structure. Feature structure
values in ``bindings`` may be modified if they are contained in
``fstruct``.
"""
if fs_class == "default":
fs_class = _default_fs_class(fstruct)
(fstruct, new_bindings) = copy.deepcopy((fstruct, bindings))
bindings.update(new_bindings)
inv_bindings = {id(val): var for (var, val) in bindings.items()}
_retract_bindings(fstruct, inv_bindings, fs_class, set())
return fstruct | Return the feature structure that is obtained by replacing each feature structure value that is bound by ``bindings`` with the variable that binds it. A feature structure value must be identical to a bound value (i.e., have equal id) to be replaced. ``bindings`` is modified to point to this new feature structure, rather than the original feature structure. Feature structure values in ``bindings`` may be modified if they are contained in ``fstruct``. |
170,592 | import copy
import re
from functools import total_ordering
from nltk.internals import raise_unorderable_types, read_str
from nltk.sem.logic import (
Expression,
LogicalExpressionException,
LogicParser,
SubstituteBindingsI,
Variable,
)
def find_variables(fstruct, fs_class="default"):
"""
:return: The set of variables used by this feature structure.
:rtype: set(Variable)
"""
if fs_class == "default":
fs_class = _default_fs_class(fstruct)
return _variables(fstruct, set(), fs_class, set())
def _rename_variables(fstruct, vars, used_vars, new_vars, fs_class, visited):
if id(fstruct) in visited:
return
visited.add(id(fstruct))
if _is_mapping(fstruct):
items = fstruct.items()
elif _is_sequence(fstruct):
items = enumerate(fstruct)
else:
raise ValueError("Expected mapping or sequence")
for (fname, fval) in items:
if isinstance(fval, Variable):
# If it's in new_vars, then rebind it.
if fval in new_vars:
fstruct[fname] = new_vars[fval]
# If it's in vars, pick a new name for it.
elif fval in vars:
new_vars[fval] = _rename_variable(fval, used_vars)
fstruct[fname] = new_vars[fval]
used_vars.add(new_vars[fval])
elif isinstance(fval, fs_class):
_rename_variables(fval, vars, used_vars, new_vars, fs_class, visited)
elif isinstance(fval, SubstituteBindingsI):
# Pick new names for any variables in `vars`
for var in fval.variables():
if var in vars and var not in new_vars:
new_vars[var] = _rename_variable(var, used_vars)
used_vars.add(new_vars[var])
# Replace all variables in `new_vars`.
fstruct[fname] = fval.substitute_bindings(new_vars)
return fstruct
def _default_fs_class(obj):
if isinstance(obj, FeatStruct):
return FeatStruct
if isinstance(obj, (dict, list)):
return (dict, list)
else:
raise ValueError(
"To unify objects of type %s, you must specify "
"fs_class explicitly." % obj.__class__.__name__
)
The provided code snippet includes necessary dependencies for implementing the `rename_variables` function. Write a Python function `def rename_variables( fstruct, vars=None, used_vars=(), new_vars=None, fs_class="default" )` to solve the following problem:
Return the feature structure that is obtained by replacing any of this feature structure's variables that are in ``vars`` with new variables. The names for these new variables will be names that are not used by any variable in ``vars``, or in ``used_vars``, or in this feature structure. :type vars: set :param vars: The set of variables that should be renamed. If not specified, ``find_variables(fstruct)`` is used; i.e., all variables will be given new names. :type used_vars: set :param used_vars: A set of variables whose names should not be used by the new variables. :type new_vars: dict(Variable -> Variable) :param new_vars: A dictionary that is used to hold the mapping from old variables to new variables. For each variable *v* in this feature structure: - If ``new_vars`` maps *v* to *v'*, then *v* will be replaced by *v'*. - If ``new_vars`` does not contain *v*, but ``vars`` does contain *v*, then a new entry will be added to ``new_vars``, mapping *v* to the new variable that is used to replace it. To consistently rename the variables in a set of feature structures, simply apply rename_variables to each one, using the same dictionary: >>> from nltk.featstruct import FeatStruct >>> fstruct1 = FeatStruct('[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]') >>> fstruct2 = FeatStruct('[subj=[agr=[number=?z,gender=?y]], obj=[agr=[number=?z,gender=?y]]]') >>> new_vars = {} # Maps old vars to alpha-renamed vars >>> fstruct1.rename_variables(new_vars=new_vars) [obj=[agr=[gender=?y2]], subj=[agr=[gender=?y2]]] >>> fstruct2.rename_variables(new_vars=new_vars) [obj=[agr=[gender=?y2, number=?z2]], subj=[agr=[gender=?y2, number=?z2]]] If new_vars is not specified, then an empty dictionary is used.
Here is the function:
def rename_variables(
fstruct, vars=None, used_vars=(), new_vars=None, fs_class="default"
):
"""
Return the feature structure that is obtained by replacing
any of this feature structure's variables that are in ``vars``
with new variables. The names for these new variables will be
names that are not used by any variable in ``vars``, or in
``used_vars``, or in this feature structure.
:type vars: set
:param vars: The set of variables that should be renamed.
If not specified, ``find_variables(fstruct)`` is used; i.e., all
variables will be given new names.
:type used_vars: set
:param used_vars: A set of variables whose names should not be
used by the new variables.
:type new_vars: dict(Variable -> Variable)
:param new_vars: A dictionary that is used to hold the mapping
from old variables to new variables. For each variable *v*
in this feature structure:
- If ``new_vars`` maps *v* to *v'*, then *v* will be
replaced by *v'*.
- If ``new_vars`` does not contain *v*, but ``vars``
does contain *v*, then a new entry will be added to
``new_vars``, mapping *v* to the new variable that is used
to replace it.
To consistently rename the variables in a set of feature
structures, simply apply rename_variables to each one, using
the same dictionary:
>>> from nltk.featstruct import FeatStruct
>>> fstruct1 = FeatStruct('[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]')
>>> fstruct2 = FeatStruct('[subj=[agr=[number=?z,gender=?y]], obj=[agr=[number=?z,gender=?y]]]')
>>> new_vars = {} # Maps old vars to alpha-renamed vars
>>> fstruct1.rename_variables(new_vars=new_vars)
[obj=[agr=[gender=?y2]], subj=[agr=[gender=?y2]]]
>>> fstruct2.rename_variables(new_vars=new_vars)
[obj=[agr=[gender=?y2, number=?z2]], subj=[agr=[gender=?y2, number=?z2]]]
If new_vars is not specified, then an empty dictionary is used.
"""
if fs_class == "default":
fs_class = _default_fs_class(fstruct)
# Default values:
if new_vars is None:
new_vars = {}
if vars is None:
vars = find_variables(fstruct, fs_class)
else:
vars = set(vars)
# Add our own variables to used_vars.
used_vars = find_variables(fstruct, fs_class).union(used_vars)
# Copy ourselves, and rename variables in the copy.
return _rename_variables(
copy.deepcopy(fstruct), vars, used_vars, new_vars, fs_class, set()
) | Return the feature structure that is obtained by replacing any of this feature structure's variables that are in ``vars`` with new variables. The names for these new variables will be names that are not used by any variable in ``vars``, or in ``used_vars``, or in this feature structure. :type vars: set :param vars: The set of variables that should be renamed. If not specified, ``find_variables(fstruct)`` is used; i.e., all variables will be given new names. :type used_vars: set :param used_vars: A set of variables whose names should not be used by the new variables. :type new_vars: dict(Variable -> Variable) :param new_vars: A dictionary that is used to hold the mapping from old variables to new variables. For each variable *v* in this feature structure: - If ``new_vars`` maps *v* to *v'*, then *v* will be replaced by *v'*. - If ``new_vars`` does not contain *v*, but ``vars`` does contain *v*, then a new entry will be added to ``new_vars``, mapping *v* to the new variable that is used to replace it. To consistently rename the variables in a set of feature structures, simply apply rename_variables to each one, using the same dictionary: >>> from nltk.featstruct import FeatStruct >>> fstruct1 = FeatStruct('[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]') >>> fstruct2 = FeatStruct('[subj=[agr=[number=?z,gender=?y]], obj=[agr=[number=?z,gender=?y]]]') >>> new_vars = {} # Maps old vars to alpha-renamed vars >>> fstruct1.rename_variables(new_vars=new_vars) [obj=[agr=[gender=?y2]], subj=[agr=[gender=?y2]]] >>> fstruct2.rename_variables(new_vars=new_vars) [obj=[agr=[gender=?y2, number=?z2]], subj=[agr=[gender=?y2, number=?z2]]] If new_vars is not specified, then an empty dictionary is used. |
170,593 | import copy
import re
from functools import total_ordering
from nltk.internals import raise_unorderable_types, read_str
from nltk.sem.logic import (
Expression,
LogicalExpressionException,
LogicParser,
SubstituteBindingsI,
Variable,
)
def _remove_variables(fstruct, fs_class, visited):
if id(fstruct) in visited:
return
visited.add(id(fstruct))
if _is_mapping(fstruct):
items = list(fstruct.items())
elif _is_sequence(fstruct):
items = list(enumerate(fstruct))
else:
raise ValueError("Expected mapping or sequence")
for (fname, fval) in items:
if isinstance(fval, Variable):
del fstruct[fname]
elif isinstance(fval, fs_class):
_remove_variables(fval, fs_class, visited)
return fstruct
def _default_fs_class(obj):
if isinstance(obj, FeatStruct):
return FeatStruct
if isinstance(obj, (dict, list)):
return (dict, list)
else:
raise ValueError(
"To unify objects of type %s, you must specify "
"fs_class explicitly." % obj.__class__.__name__
)
The provided code snippet includes necessary dependencies for implementing the `remove_variables` function. Write a Python function `def remove_variables(fstruct, fs_class="default")` to solve the following problem:
:rtype: FeatStruct :return: The feature structure that is obtained by deleting all features whose values are ``Variables``.
Here is the function:
def remove_variables(fstruct, fs_class="default"):
"""
:rtype: FeatStruct
:return: The feature structure that is obtained by deleting
all features whose values are ``Variables``.
"""
if fs_class == "default":
fs_class = _default_fs_class(fstruct)
return _remove_variables(copy.deepcopy(fstruct), fs_class, set()) | :rtype: FeatStruct :return: The feature structure that is obtained by deleting all features whose values are ``Variables``. |
170,594 | import copy
import re
from functools import total_ordering
from nltk.internals import raise_unorderable_types, read_str
from nltk.sem.logic import (
Expression,
LogicalExpressionException,
LogicParser,
SubstituteBindingsI,
Variable,
)
def unify(
fstruct1,
fstruct2,
bindings=None,
trace=False,
fail=None,
rename_vars=True,
fs_class="default",
):
"""
Unify ``fstruct1`` with ``fstruct2``, and return the resulting feature
structure. This unified feature structure is the minimal
feature structure that contains all feature value assignments from both
``fstruct1`` and ``fstruct2``, and that preserves all reentrancies.
If no such feature structure exists (because ``fstruct1`` and
``fstruct2`` specify incompatible values for some feature), then
unification fails, and ``unify`` returns None.
Bound variables are replaced by their values. Aliased
variables are replaced by their representative variable
(if unbound) or the value of their representative variable
(if bound). I.e., if variable *v* is in ``bindings``,
then *v* is replaced by ``bindings[v]``. This will
be repeated until the variable is replaced by an unbound
variable or a non-variable value.
Unbound variables are bound when they are unified with
values; and aliased when they are unified with variables.
I.e., if variable *v* is not in ``bindings``, and is
unified with a variable or value *x*, then
``bindings[v]`` is set to *x*.
If ``bindings`` is unspecified, then all variables are
assumed to be unbound. I.e., ``bindings`` defaults to an
empty dict.
>>> from nltk.featstruct import FeatStruct
>>> FeatStruct('[a=?x]').unify(FeatStruct('[b=?x]'))
[a=?x, b=?x2]
:type bindings: dict(Variable -> any)
:param bindings: A set of variable bindings to be used and
updated during unification.
:type trace: bool
:param trace: If true, generate trace output.
:type rename_vars: bool
:param rename_vars: If True, then rename any variables in
``fstruct2`` that are also used in ``fstruct1``, in order to
avoid collisions on variable names.
"""
# Decide which class(es) will be treated as feature structures,
# for the purposes of unification.
if fs_class == "default":
fs_class = _default_fs_class(fstruct1)
if _default_fs_class(fstruct2) != fs_class:
raise ValueError(
"Mixing FeatStruct objects with Python "
"dicts and lists is not supported."
)
assert isinstance(fstruct1, fs_class)
assert isinstance(fstruct2, fs_class)
# If bindings are unspecified, use an empty set of bindings.
user_bindings = bindings is not None
if bindings is None:
bindings = {}
# Make copies of fstruct1 and fstruct2 (since the unification
# algorithm is destructive). Do it all at once, to preserve
# reentrance links between fstruct1 and fstruct2. Copy bindings
# as well, in case there are any bound vars that contain parts
# of fstruct1 or fstruct2.
(fstruct1copy, fstruct2copy, bindings_copy) = copy.deepcopy(
(fstruct1, fstruct2, bindings)
)
# Copy the bindings back to the original bindings dict.
bindings.update(bindings_copy)
if rename_vars:
vars1 = find_variables(fstruct1copy, fs_class)
vars2 = find_variables(fstruct2copy, fs_class)
_rename_variables(fstruct2copy, vars1, vars2, {}, fs_class, set())
# Do the actual unification. If it fails, return None.
forward = {}
if trace:
_trace_unify_start((), fstruct1copy, fstruct2copy)
try:
result = _destructively_unify(
fstruct1copy, fstruct2copy, bindings, forward, trace, fail, fs_class, ()
)
except _UnificationFailureError:
return None
# _destructively_unify might return UnificationFailure, e.g. if we
# tried to unify a mapping with a sequence.
if result is UnificationFailure:
if fail is None:
return None
else:
return fail(fstruct1copy, fstruct2copy, ())
# Replace any feature structure that has a forward pointer
# with the target of its forward pointer.
result = _apply_forwards(result, forward, fs_class, set())
if user_bindings:
_apply_forwards_to_bindings(forward, bindings)
# Replace bound vars with values.
_resolve_aliases(bindings)
_substitute_bindings(result, bindings, fs_class, set())
# Return the result.
if trace:
_trace_unify_succeed((), result)
if trace:
_trace_bindings((), bindings)
return result
The provided code snippet includes necessary dependencies for implementing the `subsumes` function. Write a Python function `def subsumes(fstruct1, fstruct2)` to solve the following problem:
Return True if ``fstruct1`` subsumes ``fstruct2``. I.e., return true if unifying ``fstruct1`` with ``fstruct2`` would result in a feature structure equal to ``fstruct2.`` :rtype: bool
Here is the function:
def subsumes(fstruct1, fstruct2):
"""
Return True if ``fstruct1`` subsumes ``fstruct2``. I.e., return
true if unifying ``fstruct1`` with ``fstruct2`` would result in a
feature structure equal to ``fstruct2.``
:rtype: bool
"""
return fstruct2 == unify(fstruct1, fstruct2) | Return True if ``fstruct1`` subsumes ``fstruct2``. I.e., return true if unifying ``fstruct1`` with ``fstruct2`` would result in a feature structure equal to ``fstruct2.`` :rtype: bool |
170,595 | import copy
import re
from functools import total_ordering
from nltk.internals import raise_unorderable_types, read_str
from nltk.sem.logic import (
Expression,
LogicalExpressionException,
LogicParser,
SubstituteBindingsI,
Variable,
)
def unify(
fstruct1,
fstruct2,
bindings=None,
trace=False,
fail=None,
rename_vars=True,
fs_class="default",
):
"""
Unify ``fstruct1`` with ``fstruct2``, and return the resulting feature
structure. This unified feature structure is the minimal
feature structure that contains all feature value assignments from both
``fstruct1`` and ``fstruct2``, and that preserves all reentrancies.
If no such feature structure exists (because ``fstruct1`` and
``fstruct2`` specify incompatible values for some feature), then
unification fails, and ``unify`` returns None.
Bound variables are replaced by their values. Aliased
variables are replaced by their representative variable
(if unbound) or the value of their representative variable
(if bound). I.e., if variable *v* is in ``bindings``,
then *v* is replaced by ``bindings[v]``. This will
be repeated until the variable is replaced by an unbound
variable or a non-variable value.
Unbound variables are bound when they are unified with
values; and aliased when they are unified with variables.
I.e., if variable *v* is not in ``bindings``, and is
unified with a variable or value *x*, then
``bindings[v]`` is set to *x*.
If ``bindings`` is unspecified, then all variables are
assumed to be unbound. I.e., ``bindings`` defaults to an
empty dict.
>>> from nltk.featstruct import FeatStruct
>>> FeatStruct('[a=?x]').unify(FeatStruct('[b=?x]'))
[a=?x, b=?x2]
:type bindings: dict(Variable -> any)
:param bindings: A set of variable bindings to be used and
updated during unification.
:type trace: bool
:param trace: If true, generate trace output.
:type rename_vars: bool
:param rename_vars: If True, then rename any variables in
``fstruct2`` that are also used in ``fstruct1``, in order to
avoid collisions on variable names.
"""
# Decide which class(es) will be treated as feature structures,
# for the purposes of unification.
if fs_class == "default":
fs_class = _default_fs_class(fstruct1)
if _default_fs_class(fstruct2) != fs_class:
raise ValueError(
"Mixing FeatStruct objects with Python "
"dicts and lists is not supported."
)
assert isinstance(fstruct1, fs_class)
assert isinstance(fstruct2, fs_class)
# If bindings are unspecified, use an empty set of bindings.
user_bindings = bindings is not None
if bindings is None:
bindings = {}
# Make copies of fstruct1 and fstruct2 (since the unification
# algorithm is destructive). Do it all at once, to preserve
# reentrance links between fstruct1 and fstruct2. Copy bindings
# as well, in case there are any bound vars that contain parts
# of fstruct1 or fstruct2.
(fstruct1copy, fstruct2copy, bindings_copy) = copy.deepcopy(
(fstruct1, fstruct2, bindings)
)
# Copy the bindings back to the original bindings dict.
bindings.update(bindings_copy)
if rename_vars:
vars1 = find_variables(fstruct1copy, fs_class)
vars2 = find_variables(fstruct2copy, fs_class)
_rename_variables(fstruct2copy, vars1, vars2, {}, fs_class, set())
# Do the actual unification. If it fails, return None.
forward = {}
if trace:
_trace_unify_start((), fstruct1copy, fstruct2copy)
try:
result = _destructively_unify(
fstruct1copy, fstruct2copy, bindings, forward, trace, fail, fs_class, ()
)
except _UnificationFailureError:
return None
# _destructively_unify might return UnificationFailure, e.g. if we
# tried to unify a mapping with a sequence.
if result is UnificationFailure:
if fail is None:
return None
else:
return fail(fstruct1copy, fstruct2copy, ())
# Replace any feature structure that has a forward pointer
# with the target of its forward pointer.
result = _apply_forwards(result, forward, fs_class, set())
if user_bindings:
_apply_forwards_to_bindings(forward, bindings)
# Replace bound vars with values.
_resolve_aliases(bindings)
_substitute_bindings(result, bindings, fs_class, set())
# Return the result.
if trace:
_trace_unify_succeed((), result)
if trace:
_trace_bindings((), bindings)
return result
The provided code snippet includes necessary dependencies for implementing the `conflicts` function. Write a Python function `def conflicts(fstruct1, fstruct2, trace=0)` to solve the following problem:
Return a list of the feature paths of all features which are assigned incompatible values by ``fstruct1`` and ``fstruct2``. :rtype: list(tuple)
Here is the function:
def conflicts(fstruct1, fstruct2, trace=0):
"""
Return a list of the feature paths of all features which are
assigned incompatible values by ``fstruct1`` and ``fstruct2``.
:rtype: list(tuple)
"""
conflict_list = []
def add_conflict(fval1, fval2, path):
conflict_list.append(path)
return fval1
unify(fstruct1, fstruct2, fail=add_conflict, trace=trace)
return conflict_list | Return a list of the feature paths of all features which are assigned incompatible values by ``fstruct1`` and ``fstruct2``. :rtype: list(tuple) |
170,596 | import copy
import re
from functools import total_ordering
from nltk.internals import raise_unorderable_types, read_str
from nltk.sem.logic import (
Expression,
LogicalExpressionException,
LogicParser,
SubstituteBindingsI,
Variable,
)
The provided code snippet includes necessary dependencies for implementing the `_flatten` function. Write a Python function `def _flatten(lst, cls)` to solve the following problem:
Helper function -- return a copy of list, with all elements of type ``cls`` spliced in rather than appended in.
Here is the function:
def _flatten(lst, cls):
"""
Helper function -- return a copy of list, with all elements of
type ``cls`` spliced in rather than appended in.
"""
result = []
for elt in lst:
if isinstance(elt, cls):
result.extend(elt)
else:
result.append(elt)
return result | Helper function -- return a copy of list, with all elements of type ``cls`` spliced in rather than appended in. |
170,597 | import copy
import re
from functools import total_ordering
from nltk.internals import raise_unorderable_types, read_str
from nltk.sem.logic import (
Expression,
LogicalExpressionException,
LogicParser,
SubstituteBindingsI,
Variable,
)
class FeatStruct(SubstituteBindingsI):
"""
A mapping from feature identifiers to feature values, where each
feature value is either a basic value (such as a string or an
integer), or a nested feature structure. There are two types of
feature structure:
- feature dictionaries, implemented by ``FeatDict``, act like
Python dictionaries. Feature identifiers may be strings or
instances of the ``Feature`` class.
- feature lists, implemented by ``FeatList``, act like Python
lists. Feature identifiers are integers.
Feature structures may be indexed using either simple feature
identifiers or 'feature paths.' A feature path is a sequence
of feature identifiers that stand for a corresponding sequence of
indexing operations. In particular, ``fstruct[(f1,f2,...,fn)]`` is
equivalent to ``fstruct[f1][f2]...[fn]``.
Feature structures may contain reentrant feature structures. A
"reentrant feature structure" is a single feature structure
object that can be accessed via multiple feature paths. Feature
structures may also be cyclic. A feature structure is "cyclic"
if there is any feature path from the feature structure to itself.
Two feature structures are considered equal if they assign the
same values to all features, and have the same reentrancies.
By default, feature structures are mutable. They may be made
immutable with the ``freeze()`` method. Once they have been
frozen, they may be hashed, and thus used as dictionary keys.
"""
_frozen = False
""":ivar: A flag indicating whether this feature structure is
frozen or not. Once this flag is set, it should never be
un-set; and no further modification should be made to this
feature structure."""
##////////////////////////////////////////////////////////////
# { Constructor
##////////////////////////////////////////////////////////////
def __new__(cls, features=None, **morefeatures):
"""
Construct and return a new feature structure. If this
constructor is called directly, then the returned feature
structure will be an instance of either the ``FeatDict`` class
or the ``FeatList`` class.
:param features: The initial feature values for this feature
structure:
- FeatStruct(string) -> FeatStructReader().read(string)
- FeatStruct(mapping) -> FeatDict(mapping)
- FeatStruct(sequence) -> FeatList(sequence)
- FeatStruct() -> FeatDict()
:param morefeatures: If ``features`` is a mapping or None,
then ``morefeatures`` provides additional features for the
``FeatDict`` constructor.
"""
# If the FeatStruct constructor is called directly, then decide
# whether to create a FeatDict or a FeatList, based on the
# contents of the `features` argument.
if cls is FeatStruct:
if features is None:
return FeatDict.__new__(FeatDict, **morefeatures)
elif _is_mapping(features):
return FeatDict.__new__(FeatDict, features, **morefeatures)
elif morefeatures:
raise TypeError(
"Keyword arguments may only be specified "
"if features is None or is a mapping."
)
if isinstance(features, str):
if FeatStructReader._START_FDICT_RE.match(features):
return FeatDict.__new__(FeatDict, features, **morefeatures)
else:
return FeatList.__new__(FeatList, features, **morefeatures)
elif _is_sequence(features):
return FeatList.__new__(FeatList, features)
else:
raise TypeError("Expected string or mapping or sequence")
# Otherwise, construct the object as normal.
else:
return super().__new__(cls, features, **morefeatures)
##////////////////////////////////////////////////////////////
# { Uniform Accessor Methods
##////////////////////////////////////////////////////////////
# These helper functions allow the methods defined by FeatStruct
# to treat all feature structures as mappings, even if they're
# really lists. (Lists are treated as mappings from ints to vals)
def _keys(self):
"""Return an iterable of the feature identifiers used by this
FeatStruct."""
raise NotImplementedError() # Implemented by subclasses.
def _values(self):
"""Return an iterable of the feature values directly defined
by this FeatStruct."""
raise NotImplementedError() # Implemented by subclasses.
def _items(self):
"""Return an iterable of (fid,fval) pairs, where fid is a
feature identifier and fval is the corresponding feature
value, for all features defined by this FeatStruct."""
raise NotImplementedError() # Implemented by subclasses.
##////////////////////////////////////////////////////////////
# { Equality & Hashing
##////////////////////////////////////////////////////////////
def equal_values(self, other, check_reentrance=False):
"""
Return True if ``self`` and ``other`` assign the same value to
to every feature. In particular, return true if
``self[p]==other[p]`` for every feature path *p* such
that ``self[p]`` or ``other[p]`` is a base value (i.e.,
not a nested feature structure).
:param check_reentrance: If True, then also return False if
there is any difference between the reentrances of ``self``
and ``other``.
:note: the ``==`` is equivalent to ``equal_values()`` with
``check_reentrance=True``.
"""
return self._equal(other, check_reentrance, set(), set(), set())
def __eq__(self, other):
"""
Return true if ``self`` and ``other`` are both feature structures,
assign the same values to all features, and contain the same
reentrances. I.e., return
``self.equal_values(other, check_reentrance=True)``.
:see: ``equal_values()``
"""
return self._equal(other, True, set(), set(), set())
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, FeatStruct):
# raise_unorderable_types("<", self, other)
# Sometimes feature values can be pure strings,
# so we need to be able to compare with non-featstructs:
return self.__class__.__name__ < other.__class__.__name__
else:
return len(self) < len(other)
def __hash__(self):
"""
If this feature structure is frozen, return its hash value;
otherwise, raise ``TypeError``.
"""
if not self._frozen:
raise TypeError("FeatStructs must be frozen before they " "can be hashed.")
try:
return self._hash
except AttributeError:
self._hash = self._calculate_hashvalue(set())
return self._hash
def _equal(
self, other, check_reentrance, visited_self, visited_other, visited_pairs
):
"""
Return True iff self and other have equal values.
:param visited_self: A set containing the ids of all ``self``
feature structures we've already visited.
:param visited_other: A set containing the ids of all ``other``
feature structures we've already visited.
:param visited_pairs: A set containing ``(selfid, otherid)`` pairs
for all pairs of feature structures we've already visited.
"""
# If we're the same object, then we're equal.
if self is other:
return True
# If we have different classes, we're definitely not equal.
if self.__class__ != other.__class__:
return False
# If we define different features, we're definitely not equal.
# (Perform len test first because it's faster -- we should
# do profiling to see if this actually helps)
if len(self) != len(other):
return False
if set(self._keys()) != set(other._keys()):
return False
# If we're checking reentrance, then any time we revisit a
# structure, make sure that it was paired with the same
# feature structure that it is now. Note: if check_reentrance,
# then visited_pairs will never contain two pairs whose first
# values are equal, or two pairs whose second values are equal.
if check_reentrance:
if id(self) in visited_self or id(other) in visited_other:
return (id(self), id(other)) in visited_pairs
# If we're not checking reentrance, then we still need to deal
# with cycles. If we encounter the same (self, other) pair a
# second time, then we won't learn anything more by examining
# their children a second time, so just return true.
else:
if (id(self), id(other)) in visited_pairs:
return True
# Keep track of which nodes we've visited.
visited_self.add(id(self))
visited_other.add(id(other))
visited_pairs.add((id(self), id(other)))
# Now we have to check all values. If any of them don't match,
# then return false.
for (fname, self_fval) in self._items():
other_fval = other[fname]
if isinstance(self_fval, FeatStruct):
if not self_fval._equal(
other_fval,
check_reentrance,
visited_self,
visited_other,
visited_pairs,
):
return False
else:
if self_fval != other_fval:
return False
# Everything matched up; return true.
return True
def _calculate_hashvalue(self, visited):
"""
Return a hash value for this feature structure.
:require: ``self`` must be frozen.
:param visited: A set containing the ids of all feature
structures we've already visited while hashing.
"""
if id(self) in visited:
return 1
visited.add(id(self))
hashval = 5831
for (fname, fval) in sorted(self._items()):
hashval *= 37
hashval += hash(fname)
hashval *= 37
if isinstance(fval, FeatStruct):
hashval += fval._calculate_hashvalue(visited)
else:
hashval += hash(fval)
# Convert to a 32 bit int.
hashval = int(hashval & 0x7FFFFFFF)
return hashval
##////////////////////////////////////////////////////////////
# { Freezing
##////////////////////////////////////////////////////////////
#: Error message used by mutating methods when called on a frozen
#: feature structure.
_FROZEN_ERROR = "Frozen FeatStructs may not be modified."
def freeze(self):
"""
Make this feature structure, and any feature structures it
contains, immutable. Note: this method does not attempt to
'freeze' any feature value that is not a ``FeatStruct``; it
is recommended that you use only immutable feature values.
"""
if self._frozen:
return
self._freeze(set())
def frozen(self):
"""
Return True if this feature structure is immutable. Feature
structures can be made immutable with the ``freeze()`` method.
Immutable feature structures may not be made mutable again,
but new mutable copies can be produced with the ``copy()`` method.
"""
return self._frozen
def _freeze(self, visited):
"""
Make this feature structure, and any feature structure it
contains, immutable.
:param visited: A set containing the ids of all feature
structures we've already visited while freezing.
"""
if id(self) in visited:
return
visited.add(id(self))
self._frozen = True
for (fname, fval) in sorted(self._items()):
if isinstance(fval, FeatStruct):
fval._freeze(visited)
##////////////////////////////////////////////////////////////
# { Copying
##////////////////////////////////////////////////////////////
def copy(self, deep=True):
"""
Return a new copy of ``self``. The new copy will not be frozen.
:param deep: If true, create a deep copy; if false, create
a shallow copy.
"""
if deep:
return copy.deepcopy(self)
else:
return self.__class__(self)
# Subclasses should define __deepcopy__ to ensure that the new
# copy will not be frozen.
def __deepcopy__(self, memo):
raise NotImplementedError() # Implemented by subclasses.
##////////////////////////////////////////////////////////////
# { Structural Information
##////////////////////////////////////////////////////////////
def cyclic(self):
"""
Return True if this feature structure contains itself.
"""
return self._find_reentrances({})[id(self)]
def walk(self):
"""
Return an iterator that generates this feature structure, and
each feature structure it contains. Each feature structure will
be generated exactly once.
"""
return self._walk(set())
def _walk(self, visited):
"""
Return an iterator that generates this feature structure, and
each feature structure it contains.
:param visited: A set containing the ids of all feature
structures we've already visited while freezing.
"""
raise NotImplementedError() # Implemented by subclasses.
def _walk(self, visited):
if id(self) in visited:
return
visited.add(id(self))
yield self
for fval in self._values():
if isinstance(fval, FeatStruct):
yield from fval._walk(visited)
# Walk through the feature tree. The first time we see a feature
# value, map it to False (not reentrant). If we see a feature
# value more than once, then map it to True (reentrant).
def _find_reentrances(self, reentrances):
"""
Return a dictionary that maps from the ``id`` of each feature
structure contained in ``self`` (including ``self``) to a
boolean value, indicating whether it is reentrant or not.
"""
if id(self) in reentrances:
# We've seen it more than once.
reentrances[id(self)] = True
else:
# This is the first time we've seen it.
reentrances[id(self)] = False
# Recurse to contained feature structures.
for fval in self._values():
if isinstance(fval, FeatStruct):
fval._find_reentrances(reentrances)
return reentrances
##////////////////////////////////////////////////////////////
# { Variables & Bindings
##////////////////////////////////////////////////////////////
def substitute_bindings(self, bindings):
""":see: ``nltk.featstruct.substitute_bindings()``"""
return substitute_bindings(self, bindings)
def retract_bindings(self, bindings):
""":see: ``nltk.featstruct.retract_bindings()``"""
return retract_bindings(self, bindings)
def variables(self):
""":see: ``nltk.featstruct.find_variables()``"""
return find_variables(self)
def rename_variables(self, vars=None, used_vars=(), new_vars=None):
""":see: ``nltk.featstruct.rename_variables()``"""
return rename_variables(self, vars, used_vars, new_vars)
def remove_variables(self):
"""
Return the feature structure that is obtained by deleting
any feature whose value is a ``Variable``.
:rtype: FeatStruct
"""
return remove_variables(self)
##////////////////////////////////////////////////////////////
# { Unification
##////////////////////////////////////////////////////////////
def unify(self, other, bindings=None, trace=False, fail=None, rename_vars=True):
return unify(self, other, bindings, trace, fail, rename_vars)
def subsumes(self, other):
"""
Return True if ``self`` subsumes ``other``. I.e., return true
If unifying ``self`` with ``other`` would result in a feature
structure equal to ``other``.
"""
return subsumes(self, other)
##////////////////////////////////////////////////////////////
# { String Representations
##////////////////////////////////////////////////////////////
def __repr__(self):
"""
Display a single-line representation of this feature structure,
suitable for embedding in other representations.
"""
return self._repr(self._find_reentrances({}), {})
def _repr(self, reentrances, reentrance_ids):
"""
Return a string representation of this feature structure.
:param reentrances: A dictionary that maps from the ``id`` of
each feature value in self, indicating whether that value
is reentrant or not.
:param reentrance_ids: A dictionary mapping from each ``id``
of a feature value to a unique identifier. This is modified
by ``repr``: the first time a reentrant feature value is
displayed, an identifier is added to ``reentrance_ids`` for it.
"""
raise NotImplementedError()
def unify(
fstruct1,
fstruct2,
bindings=None,
trace=False,
fail=None,
rename_vars=True,
fs_class="default",
):
"""
Unify ``fstruct1`` with ``fstruct2``, and return the resulting feature
structure. This unified feature structure is the minimal
feature structure that contains all feature value assignments from both
``fstruct1`` and ``fstruct2``, and that preserves all reentrancies.
If no such feature structure exists (because ``fstruct1`` and
``fstruct2`` specify incompatible values for some feature), then
unification fails, and ``unify`` returns None.
Bound variables are replaced by their values. Aliased
variables are replaced by their representative variable
(if unbound) or the value of their representative variable
(if bound). I.e., if variable *v* is in ``bindings``,
then *v* is replaced by ``bindings[v]``. This will
be repeated until the variable is replaced by an unbound
variable or a non-variable value.
Unbound variables are bound when they are unified with
values; and aliased when they are unified with variables.
I.e., if variable *v* is not in ``bindings``, and is
unified with a variable or value *x*, then
``bindings[v]`` is set to *x*.
If ``bindings`` is unspecified, then all variables are
assumed to be unbound. I.e., ``bindings`` defaults to an
empty dict.
>>> from nltk.featstruct import FeatStruct
>>> FeatStruct('[a=?x]').unify(FeatStruct('[b=?x]'))
[a=?x, b=?x2]
:type bindings: dict(Variable -> any)
:param bindings: A set of variable bindings to be used and
updated during unification.
:type trace: bool
:param trace: If true, generate trace output.
:type rename_vars: bool
:param rename_vars: If True, then rename any variables in
``fstruct2`` that are also used in ``fstruct1``, in order to
avoid collisions on variable names.
"""
# Decide which class(es) will be treated as feature structures,
# for the purposes of unification.
if fs_class == "default":
fs_class = _default_fs_class(fstruct1)
if _default_fs_class(fstruct2) != fs_class:
raise ValueError(
"Mixing FeatStruct objects with Python "
"dicts and lists is not supported."
)
assert isinstance(fstruct1, fs_class)
assert isinstance(fstruct2, fs_class)
# If bindings are unspecified, use an empty set of bindings.
user_bindings = bindings is not None
if bindings is None:
bindings = {}
# Make copies of fstruct1 and fstruct2 (since the unification
# algorithm is destructive). Do it all at once, to preserve
# reentrance links between fstruct1 and fstruct2. Copy bindings
# as well, in case there are any bound vars that contain parts
# of fstruct1 or fstruct2.
(fstruct1copy, fstruct2copy, bindings_copy) = copy.deepcopy(
(fstruct1, fstruct2, bindings)
)
# Copy the bindings back to the original bindings dict.
bindings.update(bindings_copy)
if rename_vars:
vars1 = find_variables(fstruct1copy, fs_class)
vars2 = find_variables(fstruct2copy, fs_class)
_rename_variables(fstruct2copy, vars1, vars2, {}, fs_class, set())
# Do the actual unification. If it fails, return None.
forward = {}
if trace:
_trace_unify_start((), fstruct1copy, fstruct2copy)
try:
result = _destructively_unify(
fstruct1copy, fstruct2copy, bindings, forward, trace, fail, fs_class, ()
)
except _UnificationFailureError:
return None
# _destructively_unify might return UnificationFailure, e.g. if we
# tried to unify a mapping with a sequence.
if result is UnificationFailure:
if fail is None:
return None
else:
return fail(fstruct1copy, fstruct2copy, ())
# Replace any feature structure that has a forward pointer
# with the target of its forward pointer.
result = _apply_forwards(result, forward, fs_class, set())
if user_bindings:
_apply_forwards_to_bindings(forward, bindings)
# Replace bound vars with values.
_resolve_aliases(bindings)
_substitute_bindings(result, bindings, fs_class, set())
# Return the result.
if trace:
_trace_unify_succeed((), result)
if trace:
_trace_bindings((), bindings)
return result
def display_unification(fs1, fs2, indent=" "):
# Print the two input feature structures, side by side.
fs1_lines = ("%s" % fs1).split("\n")
fs2_lines = ("%s" % fs2).split("\n")
if len(fs1_lines) > len(fs2_lines):
blankline = "[" + " " * (len(fs2_lines[0]) - 2) + "]"
fs2_lines += [blankline] * len(fs1_lines)
else:
blankline = "[" + " " * (len(fs1_lines[0]) - 2) + "]"
fs1_lines += [blankline] * len(fs2_lines)
for (fs1_line, fs2_line) in zip(fs1_lines, fs2_lines):
print(indent + fs1_line + " " + fs2_line)
print(indent + "-" * len(fs1_lines[0]) + " " + "-" * len(fs2_lines[0]))
linelen = len(fs1_lines[0]) * 2 + 3
print(indent + "| |".center(linelen))
print(indent + "+-----UNIFY-----+".center(linelen))
print(indent + "|".center(linelen))
print(indent + "V".center(linelen))
bindings = {}
result = fs1.unify(fs2, bindings)
if result is None:
print(indent + "(FAILED)".center(linelen))
else:
print(
"\n".join(indent + l.center(linelen) for l in ("%s" % result).split("\n"))
)
if bindings and len(bindings.bound_variables()) > 0:
print(repr(bindings).center(linelen))
return result
def interactive_demo(trace=False):
import random
import sys
HELP = """
1-%d: Select the corresponding feature structure
q: Quit
t: Turn tracing on or off
l: List all feature structures
?: Help
"""
print(
"""
This demo will repeatedly present you with a list of feature
structures, and ask you to choose two for unification. Whenever a
new feature structure is generated, it is added to the list of
choices that you can pick from. However, since this can be a
large number of feature structures, the demo will only print out a
random subset for you to choose between at a given time. If you
want to see the complete lists, type "l". For a list of valid
commands, type "?".
"""
)
print('Press "Enter" to continue...')
sys.stdin.readline()
fstruct_strings = [
"[agr=[number=sing, gender=masc]]",
"[agr=[gender=masc, person=3]]",
"[agr=[gender=fem, person=3]]",
"[subj=[agr=(1)[]], agr->(1)]",
"[obj=?x]",
"[subj=?x]",
"[/=None]",
"[/=NP]",
"[cat=NP]",
"[cat=VP]",
"[cat=PP]",
"[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]",
"[gender=masc, agr=?C]",
"[gender=?S, agr=[gender=?S,person=3]]",
]
all_fstructs = [
(i, FeatStruct(fstruct_strings[i])) for i in range(len(fstruct_strings))
]
def list_fstructs(fstructs):
for i, fstruct in fstructs:
print()
lines = ("%s" % fstruct).split("\n")
print("%3d: %s" % (i + 1, lines[0]))
for line in lines[1:]:
print(" " + line)
print()
while True:
# Pick 5 feature structures at random from the master list.
MAX_CHOICES = 5
if len(all_fstructs) > MAX_CHOICES:
fstructs = sorted(random.sample(all_fstructs, MAX_CHOICES))
else:
fstructs = all_fstructs
print("_" * 75)
print("Choose two feature structures to unify:")
list_fstructs(fstructs)
selected = [None, None]
for (nth, i) in (("First", 0), ("Second", 1)):
while selected[i] is None:
print(
(
"%s feature structure (1-%d,q,t,l,?): "
% (nth, len(all_fstructs))
),
end=" ",
)
try:
input = sys.stdin.readline().strip()
if input in ("q", "Q", "x", "X"):
return
if input in ("t", "T"):
trace = not trace
print(" Trace = %s" % trace)
continue
if input in ("h", "H", "?"):
print(HELP % len(fstructs))
continue
if input in ("l", "L"):
list_fstructs(all_fstructs)
continue
num = int(input) - 1
selected[i] = all_fstructs[num][1]
print()
except:
print("Bad sentence number")
continue
if trace:
result = selected[0].unify(selected[1], trace=1)
else:
result = display_unification(selected[0], selected[1])
if result is not None:
for i, fstruct in all_fstructs:
if repr(result) == repr(fstruct):
break
else:
all_fstructs.append((len(all_fstructs), result))
print('\nType "Enter" to continue unifying; or "q" to quit.')
input = sys.stdin.readline().strip()
if input in ("q", "Q", "x", "X"):
return | null |
170,598 | import copy
import re
from functools import total_ordering
from nltk.internals import raise_unorderable_types, read_str
from nltk.sem.logic import (
Expression,
LogicalExpressionException,
LogicParser,
SubstituteBindingsI,
Variable,
)
class FeatStruct(SubstituteBindingsI):
"""
A mapping from feature identifiers to feature values, where each
feature value is either a basic value (such as a string or an
integer), or a nested feature structure. There are two types of
feature structure:
- feature dictionaries, implemented by ``FeatDict``, act like
Python dictionaries. Feature identifiers may be strings or
instances of the ``Feature`` class.
- feature lists, implemented by ``FeatList``, act like Python
lists. Feature identifiers are integers.
Feature structures may be indexed using either simple feature
identifiers or 'feature paths.' A feature path is a sequence
of feature identifiers that stand for a corresponding sequence of
indexing operations. In particular, ``fstruct[(f1,f2,...,fn)]`` is
equivalent to ``fstruct[f1][f2]...[fn]``.
Feature structures may contain reentrant feature structures. A
"reentrant feature structure" is a single feature structure
object that can be accessed via multiple feature paths. Feature
structures may also be cyclic. A feature structure is "cyclic"
if there is any feature path from the feature structure to itself.
Two feature structures are considered equal if they assign the
same values to all features, and have the same reentrancies.
By default, feature structures are mutable. They may be made
immutable with the ``freeze()`` method. Once they have been
frozen, they may be hashed, and thus used as dictionary keys.
"""
_frozen = False
""":ivar: A flag indicating whether this feature structure is
frozen or not. Once this flag is set, it should never be
un-set; and no further modification should be made to this
feature structure."""
##////////////////////////////////////////////////////////////
# { Constructor
##////////////////////////////////////////////////////////////
def __new__(cls, features=None, **morefeatures):
"""
Construct and return a new feature structure. If this
constructor is called directly, then the returned feature
structure will be an instance of either the ``FeatDict`` class
or the ``FeatList`` class.
:param features: The initial feature values for this feature
structure:
- FeatStruct(string) -> FeatStructReader().read(string)
- FeatStruct(mapping) -> FeatDict(mapping)
- FeatStruct(sequence) -> FeatList(sequence)
- FeatStruct() -> FeatDict()
:param morefeatures: If ``features`` is a mapping or None,
then ``morefeatures`` provides additional features for the
``FeatDict`` constructor.
"""
# If the FeatStruct constructor is called directly, then decide
# whether to create a FeatDict or a FeatList, based on the
# contents of the `features` argument.
if cls is FeatStruct:
if features is None:
return FeatDict.__new__(FeatDict, **morefeatures)
elif _is_mapping(features):
return FeatDict.__new__(FeatDict, features, **morefeatures)
elif morefeatures:
raise TypeError(
"Keyword arguments may only be specified "
"if features is None or is a mapping."
)
if isinstance(features, str):
if FeatStructReader._START_FDICT_RE.match(features):
return FeatDict.__new__(FeatDict, features, **morefeatures)
else:
return FeatList.__new__(FeatList, features, **morefeatures)
elif _is_sequence(features):
return FeatList.__new__(FeatList, features)
else:
raise TypeError("Expected string or mapping or sequence")
# Otherwise, construct the object as normal.
else:
return super().__new__(cls, features, **morefeatures)
##////////////////////////////////////////////////////////////
# { Uniform Accessor Methods
##////////////////////////////////////////////////////////////
# These helper functions allow the methods defined by FeatStruct
# to treat all feature structures as mappings, even if they're
# really lists. (Lists are treated as mappings from ints to vals)
def _keys(self):
"""Return an iterable of the feature identifiers used by this
FeatStruct."""
raise NotImplementedError() # Implemented by subclasses.
def _values(self):
"""Return an iterable of the feature values directly defined
by this FeatStruct."""
raise NotImplementedError() # Implemented by subclasses.
def _items(self):
"""Return an iterable of (fid,fval) pairs, where fid is a
feature identifier and fval is the corresponding feature
value, for all features defined by this FeatStruct."""
raise NotImplementedError() # Implemented by subclasses.
##////////////////////////////////////////////////////////////
# { Equality & Hashing
##////////////////////////////////////////////////////////////
def equal_values(self, other, check_reentrance=False):
"""
Return True if ``self`` and ``other`` assign the same value to
to every feature. In particular, return true if
``self[p]==other[p]`` for every feature path *p* such
that ``self[p]`` or ``other[p]`` is a base value (i.e.,
not a nested feature structure).
:param check_reentrance: If True, then also return False if
there is any difference between the reentrances of ``self``
and ``other``.
:note: the ``==`` is equivalent to ``equal_values()`` with
``check_reentrance=True``.
"""
return self._equal(other, check_reentrance, set(), set(), set())
def __eq__(self, other):
"""
Return true if ``self`` and ``other`` are both feature structures,
assign the same values to all features, and contain the same
reentrances. I.e., return
``self.equal_values(other, check_reentrance=True)``.
:see: ``equal_values()``
"""
return self._equal(other, True, set(), set(), set())
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, FeatStruct):
# raise_unorderable_types("<", self, other)
# Sometimes feature values can be pure strings,
# so we need to be able to compare with non-featstructs:
return self.__class__.__name__ < other.__class__.__name__
else:
return len(self) < len(other)
def __hash__(self):
"""
If this feature structure is frozen, return its hash value;
otherwise, raise ``TypeError``.
"""
if not self._frozen:
raise TypeError("FeatStructs must be frozen before they " "can be hashed.")
try:
return self._hash
except AttributeError:
self._hash = self._calculate_hashvalue(set())
return self._hash
def _equal(
self, other, check_reentrance, visited_self, visited_other, visited_pairs
):
"""
Return True iff self and other have equal values.
:param visited_self: A set containing the ids of all ``self``
feature structures we've already visited.
:param visited_other: A set containing the ids of all ``other``
feature structures we've already visited.
:param visited_pairs: A set containing ``(selfid, otherid)`` pairs
for all pairs of feature structures we've already visited.
"""
# If we're the same object, then we're equal.
if self is other:
return True
# If we have different classes, we're definitely not equal.
if self.__class__ != other.__class__:
return False
# If we define different features, we're definitely not equal.
# (Perform len test first because it's faster -- we should
# do profiling to see if this actually helps)
if len(self) != len(other):
return False
if set(self._keys()) != set(other._keys()):
return False
# If we're checking reentrance, then any time we revisit a
# structure, make sure that it was paired with the same
# feature structure that it is now. Note: if check_reentrance,
# then visited_pairs will never contain two pairs whose first
# values are equal, or two pairs whose second values are equal.
if check_reentrance:
if id(self) in visited_self or id(other) in visited_other:
return (id(self), id(other)) in visited_pairs
# If we're not checking reentrance, then we still need to deal
# with cycles. If we encounter the same (self, other) pair a
# second time, then we won't learn anything more by examining
# their children a second time, so just return true.
else:
if (id(self), id(other)) in visited_pairs:
return True
# Keep track of which nodes we've visited.
visited_self.add(id(self))
visited_other.add(id(other))
visited_pairs.add((id(self), id(other)))
# Now we have to check all values. If any of them don't match,
# then return false.
for (fname, self_fval) in self._items():
other_fval = other[fname]
if isinstance(self_fval, FeatStruct):
if not self_fval._equal(
other_fval,
check_reentrance,
visited_self,
visited_other,
visited_pairs,
):
return False
else:
if self_fval != other_fval:
return False
# Everything matched up; return true.
return True
def _calculate_hashvalue(self, visited):
"""
Return a hash value for this feature structure.
:require: ``self`` must be frozen.
:param visited: A set containing the ids of all feature
structures we've already visited while hashing.
"""
if id(self) in visited:
return 1
visited.add(id(self))
hashval = 5831
for (fname, fval) in sorted(self._items()):
hashval *= 37
hashval += hash(fname)
hashval *= 37
if isinstance(fval, FeatStruct):
hashval += fval._calculate_hashvalue(visited)
else:
hashval += hash(fval)
# Convert to a 32 bit int.
hashval = int(hashval & 0x7FFFFFFF)
return hashval
##////////////////////////////////////////////////////////////
# { Freezing
##////////////////////////////////////////////////////////////
#: Error message used by mutating methods when called on a frozen
#: feature structure.
_FROZEN_ERROR = "Frozen FeatStructs may not be modified."
def freeze(self):
"""
Make this feature structure, and any feature structures it
contains, immutable. Note: this method does not attempt to
'freeze' any feature value that is not a ``FeatStruct``; it
is recommended that you use only immutable feature values.
"""
if self._frozen:
return
self._freeze(set())
def frozen(self):
"""
Return True if this feature structure is immutable. Feature
structures can be made immutable with the ``freeze()`` method.
Immutable feature structures may not be made mutable again,
but new mutable copies can be produced with the ``copy()`` method.
"""
return self._frozen
def _freeze(self, visited):
"""
Make this feature structure, and any feature structure it
contains, immutable.
:param visited: A set containing the ids of all feature
structures we've already visited while freezing.
"""
if id(self) in visited:
return
visited.add(id(self))
self._frozen = True
for (fname, fval) in sorted(self._items()):
if isinstance(fval, FeatStruct):
fval._freeze(visited)
##////////////////////////////////////////////////////////////
# { Copying
##////////////////////////////////////////////////////////////
def copy(self, deep=True):
"""
Return a new copy of ``self``. The new copy will not be frozen.
:param deep: If true, create a deep copy; if false, create
a shallow copy.
"""
if deep:
return copy.deepcopy(self)
else:
return self.__class__(self)
# Subclasses should define __deepcopy__ to ensure that the new
# copy will not be frozen.
def __deepcopy__(self, memo):
raise NotImplementedError() # Implemented by subclasses.
##////////////////////////////////////////////////////////////
# { Structural Information
##////////////////////////////////////////////////////////////
def cyclic(self):
"""
Return True if this feature structure contains itself.
"""
return self._find_reentrances({})[id(self)]
def walk(self):
"""
Return an iterator that generates this feature structure, and
each feature structure it contains. Each feature structure will
be generated exactly once.
"""
return self._walk(set())
def _walk(self, visited):
"""
Return an iterator that generates this feature structure, and
each feature structure it contains.
:param visited: A set containing the ids of all feature
structures we've already visited while freezing.
"""
raise NotImplementedError() # Implemented by subclasses.
def _walk(self, visited):
if id(self) in visited:
return
visited.add(id(self))
yield self
for fval in self._values():
if isinstance(fval, FeatStruct):
yield from fval._walk(visited)
# Walk through the feature tree. The first time we see a feature
# value, map it to False (not reentrant). If we see a feature
# value more than once, then map it to True (reentrant).
def _find_reentrances(self, reentrances):
"""
Return a dictionary that maps from the ``id`` of each feature
structure contained in ``self`` (including ``self``) to a
boolean value, indicating whether it is reentrant or not.
"""
if id(self) in reentrances:
# We've seen it more than once.
reentrances[id(self)] = True
else:
# This is the first time we've seen it.
reentrances[id(self)] = False
# Recurse to contained feature structures.
for fval in self._values():
if isinstance(fval, FeatStruct):
fval._find_reentrances(reentrances)
return reentrances
##////////////////////////////////////////////////////////////
# { Variables & Bindings
##////////////////////////////////////////////////////////////
def substitute_bindings(self, bindings):
""":see: ``nltk.featstruct.substitute_bindings()``"""
return substitute_bindings(self, bindings)
def retract_bindings(self, bindings):
""":see: ``nltk.featstruct.retract_bindings()``"""
return retract_bindings(self, bindings)
def variables(self):
""":see: ``nltk.featstruct.find_variables()``"""
return find_variables(self)
def rename_variables(self, vars=None, used_vars=(), new_vars=None):
""":see: ``nltk.featstruct.rename_variables()``"""
return rename_variables(self, vars, used_vars, new_vars)
def remove_variables(self):
"""
Return the feature structure that is obtained by deleting
any feature whose value is a ``Variable``.
:rtype: FeatStruct
"""
return remove_variables(self)
##////////////////////////////////////////////////////////////
# { Unification
##////////////////////////////////////////////////////////////
def unify(self, other, bindings=None, trace=False, fail=None, rename_vars=True):
return unify(self, other, bindings, trace, fail, rename_vars)
def subsumes(self, other):
"""
Return True if ``self`` subsumes ``other``. I.e., return true
If unifying ``self`` with ``other`` would result in a feature
structure equal to ``other``.
"""
return subsumes(self, other)
##////////////////////////////////////////////////////////////
# { String Representations
##////////////////////////////////////////////////////////////
def __repr__(self):
"""
Display a single-line representation of this feature structure,
suitable for embedding in other representations.
"""
return self._repr(self._find_reentrances({}), {})
def _repr(self, reentrances, reentrance_ids):
"""
Return a string representation of this feature structure.
:param reentrances: A dictionary that maps from the ``id`` of
each feature value in self, indicating whether that value
is reentrant or not.
:param reentrance_ids: A dictionary mapping from each ``id``
of a feature value to a unique identifier. This is modified
by ``repr``: the first time a reentrant feature value is
displayed, an identifier is added to ``reentrance_ids`` for it.
"""
raise NotImplementedError()
def unify(
fstruct1,
fstruct2,
bindings=None,
trace=False,
fail=None,
rename_vars=True,
fs_class="default",
):
"""
Unify ``fstruct1`` with ``fstruct2``, and return the resulting feature
structure. This unified feature structure is the minimal
feature structure that contains all feature value assignments from both
``fstruct1`` and ``fstruct2``, and that preserves all reentrancies.
If no such feature structure exists (because ``fstruct1`` and
``fstruct2`` specify incompatible values for some feature), then
unification fails, and ``unify`` returns None.
Bound variables are replaced by their values. Aliased
variables are replaced by their representative variable
(if unbound) or the value of their representative variable
(if bound). I.e., if variable *v* is in ``bindings``,
then *v* is replaced by ``bindings[v]``. This will
be repeated until the variable is replaced by an unbound
variable or a non-variable value.
Unbound variables are bound when they are unified with
values; and aliased when they are unified with variables.
I.e., if variable *v* is not in ``bindings``, and is
unified with a variable or value *x*, then
``bindings[v]`` is set to *x*.
If ``bindings`` is unspecified, then all variables are
assumed to be unbound. I.e., ``bindings`` defaults to an
empty dict.
>>> from nltk.featstruct import FeatStruct
>>> FeatStruct('[a=?x]').unify(FeatStruct('[b=?x]'))
[a=?x, b=?x2]
:type bindings: dict(Variable -> any)
:param bindings: A set of variable bindings to be used and
updated during unification.
:type trace: bool
:param trace: If true, generate trace output.
:type rename_vars: bool
:param rename_vars: If True, then rename any variables in
``fstruct2`` that are also used in ``fstruct1``, in order to
avoid collisions on variable names.
"""
# Decide which class(es) will be treated as feature structures,
# for the purposes of unification.
if fs_class == "default":
fs_class = _default_fs_class(fstruct1)
if _default_fs_class(fstruct2) != fs_class:
raise ValueError(
"Mixing FeatStruct objects with Python "
"dicts and lists is not supported."
)
assert isinstance(fstruct1, fs_class)
assert isinstance(fstruct2, fs_class)
# If bindings are unspecified, use an empty set of bindings.
user_bindings = bindings is not None
if bindings is None:
bindings = {}
# Make copies of fstruct1 and fstruct2 (since the unification
# algorithm is destructive). Do it all at once, to preserve
# reentrance links between fstruct1 and fstruct2. Copy bindings
# as well, in case there are any bound vars that contain parts
# of fstruct1 or fstruct2.
(fstruct1copy, fstruct2copy, bindings_copy) = copy.deepcopy(
(fstruct1, fstruct2, bindings)
)
# Copy the bindings back to the original bindings dict.
bindings.update(bindings_copy)
if rename_vars:
vars1 = find_variables(fstruct1copy, fs_class)
vars2 = find_variables(fstruct2copy, fs_class)
_rename_variables(fstruct2copy, vars1, vars2, {}, fs_class, set())
# Do the actual unification. If it fails, return None.
forward = {}
if trace:
_trace_unify_start((), fstruct1copy, fstruct2copy)
try:
result = _destructively_unify(
fstruct1copy, fstruct2copy, bindings, forward, trace, fail, fs_class, ()
)
except _UnificationFailureError:
return None
# _destructively_unify might return UnificationFailure, e.g. if we
# tried to unify a mapping with a sequence.
if result is UnificationFailure:
if fail is None:
return None
else:
return fail(fstruct1copy, fstruct2copy, ())
# Replace any feature structure that has a forward pointer
# with the target of its forward pointer.
result = _apply_forwards(result, forward, fs_class, set())
if user_bindings:
_apply_forwards_to_bindings(forward, bindings)
# Replace bound vars with values.
_resolve_aliases(bindings)
_substitute_bindings(result, bindings, fs_class, set())
# Return the result.
if trace:
_trace_unify_succeed((), result)
if trace:
_trace_bindings((), bindings)
return result
The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo(trace=False)` to solve the following problem:
Just for testing
Here is the function:
def demo(trace=False):
"""
Just for testing
"""
# import random
# processor breaks with values like '3rd'
fstruct_strings = [
"[agr=[number=sing, gender=masc]]",
"[agr=[gender=masc, person=3]]",
"[agr=[gender=fem, person=3]]",
"[subj=[agr=(1)[]], agr->(1)]",
"[obj=?x]",
"[subj=?x]",
"[/=None]",
"[/=NP]",
"[cat=NP]",
"[cat=VP]",
"[cat=PP]",
"[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]",
"[gender=masc, agr=?C]",
"[gender=?S, agr=[gender=?S,person=3]]",
]
all_fstructs = [FeatStruct(fss) for fss in fstruct_strings]
# MAX_CHOICES = 5
# if len(all_fstructs) > MAX_CHOICES:
# fstructs = random.sample(all_fstructs, MAX_CHOICES)
# fstructs.sort()
# else:
# fstructs = all_fstructs
for fs1 in all_fstructs:
for fs2 in all_fstructs:
print(
"\n*******************\nfs1 is:\n%s\n\nfs2 is:\n%s\n\nresult is:\n%s"
% (fs1, fs2, unify(fs1, fs2))
) | Just for testing |
170,599 | import subprocess
from collections import namedtuple
def _giza2pair(pair_string):
i, j = pair_string.split("-")
return int(i), int(j) | null |
170,600 | import subprocess
from collections import namedtuple
def _naacl2pair(pair_string):
i, j, p = pair_string.split("-")
return int(i), int(j) | null |
170,601 | import subprocess
from collections import namedtuple
class Alignment(frozenset):
"""
A storage class for representing alignment between two sequences, s1, s2.
In general, an alignment is a set of tuples of the form (i, j, ...)
representing an alignment between the i-th element of s1 and the
j-th element of s2. Tuples are extensible (they might contain
additional data, such as a boolean to indicate sure vs possible alignments).
>>> from nltk.translate import Alignment
>>> a = Alignment([(0, 0), (0, 1), (1, 2), (2, 2)])
>>> a.invert()
Alignment([(0, 0), (1, 0), (2, 1), (2, 2)])
>>> print(a.invert())
0-0 1-0 2-1 2-2
>>> a[0]
[(0, 1), (0, 0)]
>>> a.invert()[2]
[(2, 1), (2, 2)]
>>> b = Alignment([(0, 0), (0, 1)])
>>> b.issubset(a)
True
>>> c = Alignment.fromstring('0-0 0-1')
>>> b == c
True
"""
def __new__(cls, pairs):
self = frozenset.__new__(cls, pairs)
self._len = max(p[0] for p in self) if self != frozenset([]) else 0
self._index = None
return self
def fromstring(cls, s):
"""
Read a giza-formatted string and return an Alignment object.
>>> Alignment.fromstring('0-0 2-1 9-2 21-3 10-4 7-5')
Alignment([(0, 0), (2, 1), (7, 5), (9, 2), (10, 4), (21, 3)])
:type s: str
:param s: the positional alignments in giza format
:rtype: Alignment
:return: An Alignment object corresponding to the string representation ``s``.
"""
return Alignment([_giza2pair(a) for a in s.split()])
def __getitem__(self, key):
"""
Look up the alignments that map from a given index or slice.
"""
if not self._index:
self._build_index()
return self._index.__getitem__(key)
def invert(self):
"""
Return an Alignment object, being the inverted mapping.
"""
return Alignment(((p[1], p[0]) + p[2:]) for p in self)
def range(self, positions=None):
"""
Work out the range of the mapping from the given positions.
If no positions are specified, compute the range of the entire mapping.
"""
image = set()
if not self._index:
self._build_index()
if not positions:
positions = list(range(len(self._index)))
for p in positions:
image.update(f for _, f in self._index[p])
return sorted(image)
def __repr__(self):
"""
Produce a Giza-formatted string representing the alignment.
"""
return "Alignment(%r)" % sorted(self)
def __str__(self):
"""
Produce a Giza-formatted string representing the alignment.
"""
return " ".join("%d-%d" % p[:2] for p in sorted(self))
def _build_index(self):
"""
Build a list self._index such that self._index[i] is a list
of the alignments originating from word i.
"""
self._index = [[] for _ in range(self._len + 1)]
for p in self:
self._index[p[0]].append(p)
The provided code snippet includes necessary dependencies for implementing the `_check_alignment` function. Write a Python function `def _check_alignment(num_words, num_mots, alignment)` to solve the following problem:
Check whether the alignments are legal. :param num_words: the number of source language words :type num_words: int :param num_mots: the number of target language words :type num_mots: int :param alignment: alignment to be checked :type alignment: Alignment :raise IndexError: if alignment falls outside the sentence
Here is the function:
def _check_alignment(num_words, num_mots, alignment):
"""
Check whether the alignments are legal.
:param num_words: the number of source language words
:type num_words: int
:param num_mots: the number of target language words
:type num_mots: int
:param alignment: alignment to be checked
:type alignment: Alignment
:raise IndexError: if alignment falls outside the sentence
"""
assert type(alignment) is Alignment
if not all(0 <= pair[0] < num_words for pair in alignment):
raise IndexError("Alignment is outside boundary of words")
if not all(pair[1] is None or 0 <= pair[1] < num_mots for pair in alignment):
raise IndexError("Alignment is outside boundary of mots") | Check whether the alignments are legal. :param num_words: the number of source language words :type num_words: int :param num_mots: the number of target language words :type num_mots: int :param alignment: alignment to be checked :type alignment: Alignment :raise IndexError: if alignment falls outside the sentence |
170,602 | def extract(
f_start,
f_end,
e_start,
e_end,
alignment,
f_aligned,
srctext,
trgtext,
srclen,
trglen,
max_phrase_length,
):
"""
This function checks for alignment point consistency and extracts
phrases using the chunk of consistent phrases.
A phrase pair (e, f ) is consistent with an alignment A if and only if:
(i) No English words in the phrase pair are aligned to words outside it.
∀e i ∈ e, (e i , f j ) ∈ A ⇒ f j ∈ f
(ii) No Foreign words in the phrase pair are aligned to words outside it.
∀f j ∈ f , (e i , f j ) ∈ A ⇒ e i ∈ e
(iii) The phrase pair contains at least one alignment point.
∃e i ∈ e ̄ , f j ∈ f ̄ s.t. (e i , f j ) ∈ A
:type f_start: int
:param f_start: Starting index of the possible foreign language phrases
:type f_end: int
:param f_end: End index of the possible foreign language phrases
:type e_start: int
:param e_start: Starting index of the possible source language phrases
:type e_end: int
:param e_end: End index of the possible source language phrases
:type srctext: list
:param srctext: The source language tokens, a list of string.
:type trgtext: list
:param trgtext: The target language tokens, a list of string.
:type srclen: int
:param srclen: The number of tokens in the source language tokens.
:type trglen: int
:param trglen: The number of tokens in the target language tokens.
"""
if f_end < 0: # 0-based indexing.
return {}
# Check if alignment points are consistent.
for e, f in alignment:
if (f_start <= f <= f_end) and (e < e_start or e > e_end):
return {}
# Add phrase pairs (incl. additional unaligned f)
phrases = set()
fs = f_start
while True:
fe = min(f_end, f_start + max_phrase_length - 1)
while True:
# add phrase pair ([e_start, e_end], [fs, fe]) to set E
# Need to +1 in range to include the end-point.
src_phrase = " ".join(srctext[e_start : e_end + 1])
trg_phrase = " ".join(trgtext[fs : fe + 1])
# Include more data for later ordering.
phrases.add(((e_start, e_end + 1), (fs, fe + 1), src_phrase, trg_phrase))
fe += 1
if fe in f_aligned or fe >= trglen:
break
fs -= 1
if fs in f_aligned or fs < 0:
break
return phrases
The provided code snippet includes necessary dependencies for implementing the `phrase_extraction` function. Write a Python function `def phrase_extraction(srctext, trgtext, alignment, max_phrase_length=0)` to solve the following problem:
Phrase extraction algorithm extracts all consistent phrase pairs from a word-aligned sentence pair. The idea is to loop over all possible source language (e) phrases and find the minimal foreign phrase (f) that matches each of them. Matching is done by identifying all alignment points for the source phrase and finding the shortest foreign phrase that includes all the foreign counterparts for the source words. In short, a phrase alignment has to (a) contain all alignment points for all covered words (b) contain at least one alignment point >>> srctext = "michael assumes that he will stay in the house" >>> trgtext = "michael geht davon aus , dass er im haus bleibt" >>> alignment = [(0,0), (1,1), (1,2), (1,3), (2,5), (3,6), (4,9), ... (5,9), (6,7), (7,7), (8,8)] >>> phrases = phrase_extraction(srctext, trgtext, alignment) >>> for i in sorted(phrases): ... print(i) ... ((0, 1), (0, 1), 'michael', 'michael') ((0, 2), (0, 4), 'michael assumes', 'michael geht davon aus') ((0, 2), (0, 5), 'michael assumes', 'michael geht davon aus ,') ((0, 3), (0, 6), 'michael assumes that', 'michael geht davon aus , dass') ((0, 4), (0, 7), 'michael assumes that he', 'michael geht davon aus , dass er') ((0, 9), (0, 10), 'michael assumes that he will stay in the house', 'michael geht davon aus , dass er im haus bleibt') ((1, 2), (1, 4), 'assumes', 'geht davon aus') ((1, 2), (1, 5), 'assumes', 'geht davon aus ,') ((1, 3), (1, 6), 'assumes that', 'geht davon aus , dass') ((1, 4), (1, 7), 'assumes that he', 'geht davon aus , dass er') ((1, 9), (1, 10), 'assumes that he will stay in the house', 'geht davon aus , dass er im haus bleibt') ((2, 3), (4, 6), 'that', ', dass') ((2, 3), (5, 6), 'that', 'dass') ((2, 4), (4, 7), 'that he', ', dass er') ((2, 4), (5, 7), 'that he', 'dass er') ((2, 9), (4, 10), 'that he will stay in the house', ', dass er im haus bleibt') ((2, 9), (5, 10), 'that he will stay in the house', 'dass er im haus bleibt') ((3, 4), (6, 7), 'he', 'er') ((3, 9), (6, 10), 'he will stay in the house', 'er im haus bleibt') ((4, 6), (9, 10), 'will stay', 'bleibt') ((4, 9), (7, 10), 'will stay in the house', 'im haus bleibt') ((6, 8), (7, 8), 'in the', 'im') ((6, 9), (7, 9), 'in the house', 'im haus') ((8, 9), (8, 9), 'house', 'haus') :type srctext: str :param srctext: The sentence string from the source language. :type trgtext: str :param trgtext: The sentence string from the target language. :type alignment: list(tuple) :param alignment: The word alignment outputs as list of tuples, where the first elements of tuples are the source words' indices and second elements are the target words' indices. This is also the output format of nltk.translate.ibm1 :rtype: list(tuple) :return: A list of tuples, each element in a list is a phrase and each phrase is a tuple made up of (i) its source location, (ii) its target location, (iii) the source phrase and (iii) the target phrase. The phrase list of tuples represents all the possible phrases extracted from the word alignments. :type max_phrase_length: int :param max_phrase_length: maximal phrase length, if 0 or not specified it is set to a length of the longer sentence (srctext or trgtext).
Here is the function:
def phrase_extraction(srctext, trgtext, alignment, max_phrase_length=0):
"""
Phrase extraction algorithm extracts all consistent phrase pairs from
a word-aligned sentence pair.
The idea is to loop over all possible source language (e) phrases and find
the minimal foreign phrase (f) that matches each of them. Matching is done
by identifying all alignment points for the source phrase and finding the
shortest foreign phrase that includes all the foreign counterparts for the
source words.
In short, a phrase alignment has to
(a) contain all alignment points for all covered words
(b) contain at least one alignment point
>>> srctext = "michael assumes that he will stay in the house"
>>> trgtext = "michael geht davon aus , dass er im haus bleibt"
>>> alignment = [(0,0), (1,1), (1,2), (1,3), (2,5), (3,6), (4,9),
... (5,9), (6,7), (7,7), (8,8)]
>>> phrases = phrase_extraction(srctext, trgtext, alignment)
>>> for i in sorted(phrases):
... print(i)
...
((0, 1), (0, 1), 'michael', 'michael')
((0, 2), (0, 4), 'michael assumes', 'michael geht davon aus')
((0, 2), (0, 5), 'michael assumes', 'michael geht davon aus ,')
((0, 3), (0, 6), 'michael assumes that', 'michael geht davon aus , dass')
((0, 4), (0, 7), 'michael assumes that he', 'michael geht davon aus , dass er')
((0, 9), (0, 10), 'michael assumes that he will stay in the house', 'michael geht davon aus , dass er im haus bleibt')
((1, 2), (1, 4), 'assumes', 'geht davon aus')
((1, 2), (1, 5), 'assumes', 'geht davon aus ,')
((1, 3), (1, 6), 'assumes that', 'geht davon aus , dass')
((1, 4), (1, 7), 'assumes that he', 'geht davon aus , dass er')
((1, 9), (1, 10), 'assumes that he will stay in the house', 'geht davon aus , dass er im haus bleibt')
((2, 3), (4, 6), 'that', ', dass')
((2, 3), (5, 6), 'that', 'dass')
((2, 4), (4, 7), 'that he', ', dass er')
((2, 4), (5, 7), 'that he', 'dass er')
((2, 9), (4, 10), 'that he will stay in the house', ', dass er im haus bleibt')
((2, 9), (5, 10), 'that he will stay in the house', 'dass er im haus bleibt')
((3, 4), (6, 7), 'he', 'er')
((3, 9), (6, 10), 'he will stay in the house', 'er im haus bleibt')
((4, 6), (9, 10), 'will stay', 'bleibt')
((4, 9), (7, 10), 'will stay in the house', 'im haus bleibt')
((6, 8), (7, 8), 'in the', 'im')
((6, 9), (7, 9), 'in the house', 'im haus')
((8, 9), (8, 9), 'house', 'haus')
:type srctext: str
:param srctext: The sentence string from the source language.
:type trgtext: str
:param trgtext: The sentence string from the target language.
:type alignment: list(tuple)
:param alignment: The word alignment outputs as list of tuples, where
the first elements of tuples are the source words' indices and
second elements are the target words' indices. This is also the output
format of nltk.translate.ibm1
:rtype: list(tuple)
:return: A list of tuples, each element in a list is a phrase and each
phrase is a tuple made up of (i) its source location, (ii) its target
location, (iii) the source phrase and (iii) the target phrase. The phrase
list of tuples represents all the possible phrases extracted from the
word alignments.
:type max_phrase_length: int
:param max_phrase_length: maximal phrase length, if 0 or not specified
it is set to a length of the longer sentence (srctext or trgtext).
"""
srctext = srctext.split() # e
trgtext = trgtext.split() # f
srclen = len(srctext) # len(e)
trglen = len(trgtext) # len(f)
# Keeps an index of which source/target words that are aligned.
f_aligned = [j for _, j in alignment]
max_phrase_length = max_phrase_length or max(srclen, trglen)
# set of phrase pairs BP
bp = set()
for e_start in range(srclen):
max_idx = min(srclen, e_start + max_phrase_length)
for e_end in range(e_start, max_idx):
# // find the minimally matching foreign phrase
# (f start , f end ) = ( length(f), 0 )
# f_start ∈ [0, len(f) - 1]; f_end ∈ [0, len(f) - 1]
f_start, f_end = trglen - 1, -1 # 0-based indexing
for e, f in alignment:
if e_start <= e <= e_end:
f_start = min(f, f_start)
f_end = max(f, f_end)
# add extract (f start , f end , e start , e end ) to set BP
phrases = extract(
f_start,
f_end,
e_start,
e_end,
alignment,
f_aligned,
srctext,
trgtext,
srclen,
trglen,
max_phrase_length,
)
if phrases:
bp.update(phrases)
return bp | Phrase extraction algorithm extracts all consistent phrase pairs from a word-aligned sentence pair. The idea is to loop over all possible source language (e) phrases and find the minimal foreign phrase (f) that matches each of them. Matching is done by identifying all alignment points for the source phrase and finding the shortest foreign phrase that includes all the foreign counterparts for the source words. In short, a phrase alignment has to (a) contain all alignment points for all covered words (b) contain at least one alignment point >>> srctext = "michael assumes that he will stay in the house" >>> trgtext = "michael geht davon aus , dass er im haus bleibt" >>> alignment = [(0,0), (1,1), (1,2), (1,3), (2,5), (3,6), (4,9), ... (5,9), (6,7), (7,7), (8,8)] >>> phrases = phrase_extraction(srctext, trgtext, alignment) >>> for i in sorted(phrases): ... print(i) ... ((0, 1), (0, 1), 'michael', 'michael') ((0, 2), (0, 4), 'michael assumes', 'michael geht davon aus') ((0, 2), (0, 5), 'michael assumes', 'michael geht davon aus ,') ((0, 3), (0, 6), 'michael assumes that', 'michael geht davon aus , dass') ((0, 4), (0, 7), 'michael assumes that he', 'michael geht davon aus , dass er') ((0, 9), (0, 10), 'michael assumes that he will stay in the house', 'michael geht davon aus , dass er im haus bleibt') ((1, 2), (1, 4), 'assumes', 'geht davon aus') ((1, 2), (1, 5), 'assumes', 'geht davon aus ,') ((1, 3), (1, 6), 'assumes that', 'geht davon aus , dass') ((1, 4), (1, 7), 'assumes that he', 'geht davon aus , dass er') ((1, 9), (1, 10), 'assumes that he will stay in the house', 'geht davon aus , dass er im haus bleibt') ((2, 3), (4, 6), 'that', ', dass') ((2, 3), (5, 6), 'that', 'dass') ((2, 4), (4, 7), 'that he', ', dass er') ((2, 4), (5, 7), 'that he', 'dass er') ((2, 9), (4, 10), 'that he will stay in the house', ', dass er im haus bleibt') ((2, 9), (5, 10), 'that he will stay in the house', 'dass er im haus bleibt') ((3, 4), (6, 7), 'he', 'er') ((3, 9), (6, 10), 'he will stay in the house', 'er im haus bleibt') ((4, 6), (9, 10), 'will stay', 'bleibt') ((4, 9), (7, 10), 'will stay in the house', 'im haus bleibt') ((6, 8), (7, 8), 'in the', 'im') ((6, 9), (7, 9), 'in the house', 'im haus') ((8, 9), (8, 9), 'house', 'haus') :type srctext: str :param srctext: The sentence string from the source language. :type trgtext: str :param trgtext: The sentence string from the target language. :type alignment: list(tuple) :param alignment: The word alignment outputs as list of tuples, where the first elements of tuples are the source words' indices and second elements are the target words' indices. This is also the output format of nltk.translate.ibm1 :rtype: list(tuple) :return: A list of tuples, each element in a list is a phrase and each phrase is a tuple made up of (i) its source location, (ii) its target location, (iii) the source phrase and (iii) the target phrase. The phrase list of tuples represents all the possible phrases extracted from the word alignments. :type max_phrase_length: int :param max_phrase_length: maximal phrase length, if 0 or not specified it is set to a length of the longer sentence (srctext or trgtext). |
170,603 | from bisect import insort_left
from collections import defaultdict
from copy import deepcopy
from math import ceil
The provided code snippet includes necessary dependencies for implementing the `longest_target_sentence_length` function. Write a Python function `def longest_target_sentence_length(sentence_aligned_corpus)` to solve the following problem:
:param sentence_aligned_corpus: Parallel corpus under consideration :type sentence_aligned_corpus: list(AlignedSent) :return: Number of words in the longest target language sentence of ``sentence_aligned_corpus``
Here is the function:
def longest_target_sentence_length(sentence_aligned_corpus):
"""
:param sentence_aligned_corpus: Parallel corpus under consideration
:type sentence_aligned_corpus: list(AlignedSent)
:return: Number of words in the longest target language sentence
of ``sentence_aligned_corpus``
"""
max_m = 0
for aligned_sentence in sentence_aligned_corpus:
m = len(aligned_sentence.words)
max_m = max(m, max_m)
return max_m | :param sentence_aligned_corpus: Parallel corpus under consideration :type sentence_aligned_corpus: list(AlignedSent) :return: Number of words in the longest target language sentence of ``sentence_aligned_corpus`` |
170,604 | from collections import Counter
from nltk.util import everygrams, ngrams
def corpus_gleu(list_of_references, hypotheses, min_len=1, max_len=4):
"""
Calculate a single corpus-level GLEU score (aka. system-level GLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average
precision), Wu et al. (2016) sum up the matching tokens and the max of
hypothesis and reference tokens for each sentence, then compute using the
aggregate values.
From Mike Schuster (via email):
"For the corpus, we just add up the two statistics n_match and
n_all = max(n_all_output, n_all_target) for all sentences, then
calculate gleu_score = n_match / n_all, so it is not just a mean of
the sentence gleu scores (in our case, longer sentences count more,
which I think makes sense as they are more difficult to translate)."
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_gleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5673...
The example below show that corpus_gleu() is different from averaging
sentence_gleu() for hypotheses
>>> score1 = sentence_gleu([ref1a], hyp1)
>>> score2 = sentence_gleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6144...
:param list_of_references: a list of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param min_len: The minimum order of n-gram this function should extract.
:type min_len: int
:param max_len: The maximum order of n-gram this function should extract.
:type max_len: int
:return: The corpus-level GLEU score.
:rtype: float
"""
# sanity check
assert len(list_of_references) == len(
hypotheses
), "The number of hypotheses and their reference(s) should be the same"
# sum matches and max-token-lengths over all sentences
corpus_n_match = 0
corpus_n_all = 0
for references, hypothesis in zip(list_of_references, hypotheses):
hyp_ngrams = Counter(everygrams(hypothesis, min_len, max_len))
tpfp = sum(hyp_ngrams.values()) # True positives + False positives.
hyp_counts = []
for reference in references:
ref_ngrams = Counter(everygrams(reference, min_len, max_len))
tpfn = sum(ref_ngrams.values()) # True positives + False negatives.
overlap_ngrams = ref_ngrams & hyp_ngrams
tp = sum(overlap_ngrams.values()) # True positives.
# While GLEU is defined as the minimum of precision and
# recall, we can reduce the number of division operations by one by
# instead finding the maximum of the denominators for the precision
# and recall formulae, since the numerators are the same:
# precision = tp / tpfp
# recall = tp / tpfn
# gleu_score = min(precision, recall) == tp / max(tpfp, tpfn)
n_all = max(tpfp, tpfn)
if n_all > 0:
hyp_counts.append((tp, n_all))
# use the reference yielding the highest score
if hyp_counts:
n_match, n_all = max(hyp_counts, key=lambda hc: hc[0] / hc[1])
corpus_n_match += n_match
corpus_n_all += n_all
# corner case: empty corpus or empty references---don't divide by zero!
if corpus_n_all == 0:
gleu_score = 0.0
else:
gleu_score = corpus_n_match / corpus_n_all
return gleu_score
The provided code snippet includes necessary dependencies for implementing the `sentence_gleu` function. Write a Python function `def sentence_gleu(references, hypothesis, min_len=1, max_len=4)` to solve the following problem:
Calculates the sentence level GLEU (Google-BLEU) score described in Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser, Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes, Jeffrey Dean. (2016) Google’s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation. eprint arXiv:1609.08144. https://arxiv.org/pdf/1609.08144v2.pdf Retrieved on 27 Oct 2016. From Wu et al. (2016): "The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective." Note: The initial implementation only allowed a single reference, but now a list of references is required (which is consistent with bleu_score.sentence_bleu()). The infamous "the the the ... " example >>> ref = 'the cat is on the mat'.split() >>> hyp = 'the the the the the the the'.split() >>> sentence_gleu([ref], hyp) # doctest: +ELLIPSIS 0.0909... An example to evaluate normal machine translation outputs >>> ref1 = str('It is a guide to action that ensures that the military ' ... 'will forever heed Party commands').split() >>> hyp1 = str('It is a guide to action which ensures that the military ' ... 'always obeys the commands of the party').split() >>> hyp2 = str('It is to insure the troops forever hearing the activity ' ... 'guidebook that party direct').split() >>> sentence_gleu([ref1], hyp1) # doctest: +ELLIPSIS 0.4393... >>> sentence_gleu([ref1], hyp2) # doctest: +ELLIPSIS 0.1206... :param references: a list of reference sentences :type references: list(list(str)) :param hypothesis: a hypothesis sentence :type hypothesis: list(str) :param min_len: The minimum order of n-gram this function should extract. :type min_len: int :param max_len: The maximum order of n-gram this function should extract. :type max_len: int :return: the sentence level GLEU score. :rtype: float
Here is the function:
def sentence_gleu(references, hypothesis, min_len=1, max_len=4):
"""
Calculates the sentence level GLEU (Google-BLEU) score described in
Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi,
Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey,
Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser,
Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens,
George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith,
Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes,
Jeffrey Dean. (2016) Google’s Neural Machine Translation System:
Bridging the Gap between Human and Machine Translation.
eprint arXiv:1609.08144. https://arxiv.org/pdf/1609.08144v2.pdf
Retrieved on 27 Oct 2016.
From Wu et al. (2016):
"The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective."
Note: The initial implementation only allowed a single reference, but now
a list of references is required (which is consistent with
bleu_score.sentence_bleu()).
The infamous "the the the ... " example
>>> ref = 'the cat is on the mat'.split()
>>> hyp = 'the the the the the the the'.split()
>>> sentence_gleu([ref], hyp) # doctest: +ELLIPSIS
0.0909...
An example to evaluate normal machine translation outputs
>>> ref1 = str('It is a guide to action that ensures that the military '
... 'will forever heed Party commands').split()
>>> hyp1 = str('It is a guide to action which ensures that the military '
... 'always obeys the commands of the party').split()
>>> hyp2 = str('It is to insure the troops forever hearing the activity '
... 'guidebook that party direct').split()
>>> sentence_gleu([ref1], hyp1) # doctest: +ELLIPSIS
0.4393...
>>> sentence_gleu([ref1], hyp2) # doctest: +ELLIPSIS
0.1206...
:param references: a list of reference sentences
:type references: list(list(str))
:param hypothesis: a hypothesis sentence
:type hypothesis: list(str)
:param min_len: The minimum order of n-gram this function should extract.
:type min_len: int
:param max_len: The maximum order of n-gram this function should extract.
:type max_len: int
:return: the sentence level GLEU score.
:rtype: float
"""
return corpus_gleu([references], [hypothesis], min_len=min_len, max_len=max_len) | Calculates the sentence level GLEU (Google-BLEU) score described in Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser, Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes, Jeffrey Dean. (2016) Google’s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation. eprint arXiv:1609.08144. https://arxiv.org/pdf/1609.08144v2.pdf Retrieved on 27 Oct 2016. From Wu et al. (2016): "The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective." Note: The initial implementation only allowed a single reference, but now a list of references is required (which is consistent with bleu_score.sentence_bleu()). The infamous "the the the ... " example >>> ref = 'the cat is on the mat'.split() >>> hyp = 'the the the the the the the'.split() >>> sentence_gleu([ref], hyp) # doctest: +ELLIPSIS 0.0909... An example to evaluate normal machine translation outputs >>> ref1 = str('It is a guide to action that ensures that the military ' ... 'will forever heed Party commands').split() >>> hyp1 = str('It is a guide to action which ensures that the military ' ... 'always obeys the commands of the party').split() >>> hyp2 = str('It is to insure the troops forever hearing the activity ' ... 'guidebook that party direct').split() >>> sentence_gleu([ref1], hyp1) # doctest: +ELLIPSIS 0.4393... >>> sentence_gleu([ref1], hyp2) # doctest: +ELLIPSIS 0.1206... :param references: a list of reference sentences :type references: list(list(str)) :param hypothesis: a hypothesis sentence :type hypothesis: list(str) :param min_len: The minimum order of n-gram this function should extract. :type min_len: int :param max_len: The maximum order of n-gram this function should extract. :type max_len: int :return: the sentence level GLEU score. :rtype: float |
170,605 | from itertools import chain, product
from typing import Callable, Iterable, List, Tuple
from nltk.corpus import WordNetCorpusReader, wordnet
from nltk.stem.api import StemmerI
from nltk.stem.porter import PorterStemmer
def _generate_enums(
hypothesis: Iterable[str],
reference: Iterable[str],
preprocess: Callable[[str], str] = str.lower,
) -> Tuple[List[Tuple[int, str]], List[Tuple[int, str]]]:
"""
Takes in pre-tokenized inputs for hypothesis and reference and returns
enumerated word lists for each of them
:param hypothesis: pre-tokenized hypothesis
:param reference: pre-tokenized reference
:preprocess: preprocessing method (default str.lower)
:return: enumerated words list
"""
if isinstance(hypothesis, str):
raise TypeError(
f'"hypothesis" expects pre-tokenized hypothesis (Iterable[str]): {hypothesis}'
)
if isinstance(reference, str):
raise TypeError(
f'"reference" expects pre-tokenized reference (Iterable[str]): {reference}'
)
enum_hypothesis_list = list(enumerate(map(preprocess, hypothesis)))
enum_reference_list = list(enumerate(map(preprocess, reference)))
return enum_hypothesis_list, enum_reference_list
def _match_enums(
enum_hypothesis_list: List[Tuple[int, str]],
enum_reference_list: List[Tuple[int, str]],
) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]:
"""
matches exact words in hypothesis and reference and returns
a word mapping between enum_hypothesis_list and enum_reference_list
based on the enumerated word id.
:param enum_hypothesis_list: enumerated hypothesis list
:param enum_reference_list: enumerated reference list
:return: enumerated matched tuples, enumerated unmatched hypothesis tuples,
enumerated unmatched reference tuples
"""
word_match = []
for i in range(len(enum_hypothesis_list))[::-1]:
for j in range(len(enum_reference_list))[::-1]:
if enum_hypothesis_list[i][1] == enum_reference_list[j][1]:
word_match.append(
(enum_hypothesis_list[i][0], enum_reference_list[j][0])
)
enum_hypothesis_list.pop(i)
enum_reference_list.pop(j)
break
return word_match, enum_hypothesis_list, enum_reference_list
List = _Alias()
class Iterable(Protocol[_T_co]):
def __iter__(self) -> Iterator[_T_co]: ...
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
# is used.
return self._generics_manager.is_homogenous_tuple()
def py__simple_getitem__(self, index):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
else:
if isinstance(index, int):
return self._generics_manager.get_index_and_execute(index)
debug.dbg('The getitem type on Tuple was %s' % index)
return NO_VALUES
def py__iter__(self, contextualized_node=None):
if self._is_homogenous():
yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
else:
for v in self._generics_manager.to_tuple():
yield LazyKnownValues(v.execute_annotation())
def py__getitem__(self, index_value_set, contextualized_node):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
return ValueSet.from_sets(
self._generics_manager.to_tuple()
).execute_annotation()
def _get_wrapped_value(self):
tuple_, = self.inference_state.builtins_module \
.py__getattribute__('tuple').execute_annotation()
return tuple_
def name(self):
return self._wrapped_value.name
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
value_set = value_set.filter(
lambda x: x.py__name__().lower() == 'tuple',
)
if self._is_homogenous():
# The parameter annotation is of the form `Tuple[T, ...]`,
# so we treat the incoming tuple like a iterable sequence
# rather than a positional container of elements.
return self._class_value.get_generics()[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# The parameter annotation has only explicit type parameters
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
# treat the incoming values as needing to match the annotation
# exactly, just as we would for non-tuple annotations.
type_var_dict = {}
for element in value_set:
try:
method = element.get_annotated_class_object
except AttributeError:
# This might still happen, because the tuple name matching
# above is not 100% correct, so just catch the remaining
# cases here.
continue
py_class = method()
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self._class_value, py_class),
)
return type_var_dict
The provided code snippet includes necessary dependencies for implementing the `exact_match` function. Write a Python function `def exact_match( hypothesis: Iterable[str], reference: Iterable[str] ) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]` to solve the following problem:
matches exact words in hypothesis and reference and returns a word mapping based on the enumerated word id between hypothesis and reference :param hypothesis: pre-tokenized hypothesis :param reference: pre-tokenized reference :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, enumerated unmatched reference tuples
Here is the function:
def exact_match(
hypothesis: Iterable[str], reference: Iterable[str]
) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]:
"""
matches exact words in hypothesis and reference
and returns a word mapping based on the enumerated
word id between hypothesis and reference
:param hypothesis: pre-tokenized hypothesis
:param reference: pre-tokenized reference
:return: enumerated matched tuples, enumerated unmatched hypothesis tuples,
enumerated unmatched reference tuples
"""
enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference)
return _match_enums(enum_hypothesis_list, enum_reference_list) | matches exact words in hypothesis and reference and returns a word mapping based on the enumerated word id between hypothesis and reference :param hypothesis: pre-tokenized hypothesis :param reference: pre-tokenized reference :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, enumerated unmatched reference tuples |
170,606 | from itertools import chain, product
from typing import Callable, Iterable, List, Tuple
from nltk.corpus import WordNetCorpusReader, wordnet
from nltk.stem.api import StemmerI
from nltk.stem.porter import PorterStemmer
def _generate_enums(
hypothesis: Iterable[str],
reference: Iterable[str],
preprocess: Callable[[str], str] = str.lower,
) -> Tuple[List[Tuple[int, str]], List[Tuple[int, str]]]:
"""
Takes in pre-tokenized inputs for hypothesis and reference and returns
enumerated word lists for each of them
:param hypothesis: pre-tokenized hypothesis
:param reference: pre-tokenized reference
:preprocess: preprocessing method (default str.lower)
:return: enumerated words list
"""
if isinstance(hypothesis, str):
raise TypeError(
f'"hypothesis" expects pre-tokenized hypothesis (Iterable[str]): {hypothesis}'
)
if isinstance(reference, str):
raise TypeError(
f'"reference" expects pre-tokenized reference (Iterable[str]): {reference}'
)
enum_hypothesis_list = list(enumerate(map(preprocess, hypothesis)))
enum_reference_list = list(enumerate(map(preprocess, reference)))
return enum_hypothesis_list, enum_reference_list
def _enum_stem_match(
enum_hypothesis_list: List[Tuple[int, str]],
enum_reference_list: List[Tuple[int, str]],
stemmer: StemmerI = PorterStemmer(),
) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]:
"""
Stems each word and matches them in hypothesis and reference
and returns a word mapping between enum_hypothesis_list and
enum_reference_list based on the enumerated word id. The function also
returns a enumerated list of unmatched words for hypothesis and reference.
:param enum_hypothesis_list: enumerated hypothesis list
:param enum_reference_list: enumerated reference list
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:return: enumerated matched tuples, enumerated unmatched hypothesis tuples,
enumerated unmatched reference tuples
"""
stemmed_enum_hypothesis_list = [
(word_pair[0], stemmer.stem(word_pair[1])) for word_pair in enum_hypothesis_list
]
stemmed_enum_reference_list = [
(word_pair[0], stemmer.stem(word_pair[1])) for word_pair in enum_reference_list
]
return _match_enums(stemmed_enum_hypothesis_list, stemmed_enum_reference_list)
List = _Alias()
class Iterable(Protocol[_T_co]):
def __iter__(self) -> Iterator[_T_co]: ...
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
# is used.
return self._generics_manager.is_homogenous_tuple()
def py__simple_getitem__(self, index):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
else:
if isinstance(index, int):
return self._generics_manager.get_index_and_execute(index)
debug.dbg('The getitem type on Tuple was %s' % index)
return NO_VALUES
def py__iter__(self, contextualized_node=None):
if self._is_homogenous():
yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
else:
for v in self._generics_manager.to_tuple():
yield LazyKnownValues(v.execute_annotation())
def py__getitem__(self, index_value_set, contextualized_node):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
return ValueSet.from_sets(
self._generics_manager.to_tuple()
).execute_annotation()
def _get_wrapped_value(self):
tuple_, = self.inference_state.builtins_module \
.py__getattribute__('tuple').execute_annotation()
return tuple_
def name(self):
return self._wrapped_value.name
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
value_set = value_set.filter(
lambda x: x.py__name__().lower() == 'tuple',
)
if self._is_homogenous():
# The parameter annotation is of the form `Tuple[T, ...]`,
# so we treat the incoming tuple like a iterable sequence
# rather than a positional container of elements.
return self._class_value.get_generics()[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# The parameter annotation has only explicit type parameters
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
# treat the incoming values as needing to match the annotation
# exactly, just as we would for non-tuple annotations.
type_var_dict = {}
for element in value_set:
try:
method = element.get_annotated_class_object
except AttributeError:
# This might still happen, because the tuple name matching
# above is not 100% correct, so just catch the remaining
# cases here.
continue
py_class = method()
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self._class_value, py_class),
)
return type_var_dict
class StemmerI(metaclass=ABCMeta):
"""
A processing interface for removing morphological affixes from
words. This process is known as stemming.
"""
def stem(self, token):
"""
Strip affixes from the token and return the stem.
:param token: The token that should be stemmed.
:type token: str
"""
class PorterStemmer(StemmerI):
"""
A word stemmer based on the Porter stemming algorithm.
Porter, M. "An algorithm for suffix stripping."
Program 14.3 (1980): 130-137.
See https://www.tartarus.org/~martin/PorterStemmer/ for the homepage
of the algorithm.
Martin Porter has endorsed several modifications to the Porter
algorithm since writing his original paper, and those extensions are
included in the implementations on his website. Additionally, others
have proposed further improvements to the algorithm, including NLTK
contributors. There are thus three modes that can be selected by
passing the appropriate constant to the class constructor's `mode`
attribute:
- PorterStemmer.ORIGINAL_ALGORITHM
An implementation that is faithful to the original paper.
Note that Martin Porter has deprecated this version of the
algorithm. Martin distributes implementations of the Porter
Stemmer in many languages, hosted at:
https://www.tartarus.org/~martin/PorterStemmer/
and all of these implementations include his extensions. He
strongly recommends against using the original, published
version of the algorithm; only use this mode if you clearly
understand why you are choosing to do so.
- PorterStemmer.MARTIN_EXTENSIONS
An implementation that only uses the modifications to the
algorithm that are included in the implementations on Martin
Porter's website. He has declared Porter frozen, so the
behaviour of those implementations should never change.
- PorterStemmer.NLTK_EXTENSIONS (default)
An implementation that includes further improvements devised by
NLTK contributors or taken from other modified implementations
found on the web.
For the best stemming, you should use the default NLTK_EXTENSIONS
version. However, if you need to get the same results as either the
original algorithm or one of Martin Porter's hosted versions for
compatibility with an existing implementation or dataset, you can use
one of the other modes instead.
"""
# Modes the Stemmer can be instantiated in
NLTK_EXTENSIONS = "NLTK_EXTENSIONS"
MARTIN_EXTENSIONS = "MARTIN_EXTENSIONS"
ORIGINAL_ALGORITHM = "ORIGINAL_ALGORITHM"
def __init__(self, mode=NLTK_EXTENSIONS):
if mode not in (
self.NLTK_EXTENSIONS,
self.MARTIN_EXTENSIONS,
self.ORIGINAL_ALGORITHM,
):
raise ValueError(
"Mode must be one of PorterStemmer.NLTK_EXTENSIONS, "
"PorterStemmer.MARTIN_EXTENSIONS, or "
"PorterStemmer.ORIGINAL_ALGORITHM"
)
self.mode = mode
if self.mode == self.NLTK_EXTENSIONS:
# This is a table of irregular forms. It is quite short,
# but still reflects the errors actually drawn to Martin
# Porter's attention over a 20 year period!
irregular_forms = {
"sky": ["sky", "skies"],
"die": ["dying"],
"lie": ["lying"],
"tie": ["tying"],
"news": ["news"],
"inning": ["innings", "inning"],
"outing": ["outings", "outing"],
"canning": ["cannings", "canning"],
"howe": ["howe"],
"proceed": ["proceed"],
"exceed": ["exceed"],
"succeed": ["succeed"],
}
self.pool = {}
for key in irregular_forms:
for val in irregular_forms[key]:
self.pool[val] = key
self.vowels = frozenset(["a", "e", "i", "o", "u"])
def _is_consonant(self, word, i):
"""Returns True if word[i] is a consonant, False otherwise
A consonant is defined in the paper as follows:
A consonant in a word is a letter other than A, E, I, O or
U, and other than Y preceded by a consonant. (The fact that
the term `consonant' is defined to some extent in terms of
itself does not make it ambiguous.) So in TOY the consonants
are T and Y, and in SYZYGY they are S, Z and G. If a letter
is not a consonant it is a vowel.
"""
if word[i] in self.vowels:
return False
if word[i] == "y":
if i == 0:
return True
else:
return not self._is_consonant(word, i - 1)
return True
def _measure(self, stem):
r"""Returns the 'measure' of stem, per definition in the paper
From the paper:
A consonant will be denoted by c, a vowel by v. A list
ccc... of length greater than 0 will be denoted by C, and a
list vvv... of length greater than 0 will be denoted by V.
Any word, or part of a word, therefore has one of the four
forms:
CVCV ... C
CVCV ... V
VCVC ... C
VCVC ... V
These may all be represented by the single form
[C]VCVC ... [V]
where the square brackets denote arbitrary presence of their
contents. Using (VC){m} to denote VC repeated m times, this
may again be written as
[C](VC){m}[V].
m will be called the \measure\ of any word or word part when
represented in this form. The case m = 0 covers the null
word. Here are some examples:
m=0 TR, EE, TREE, Y, BY.
m=1 TROUBLE, OATS, TREES, IVY.
m=2 TROUBLES, PRIVATE, OATEN, ORRERY.
"""
cv_sequence = ""
# Construct a string of 'c's and 'v's representing whether each
# character in `stem` is a consonant or a vowel.
# e.g. 'falafel' becomes 'cvcvcvc',
# 'architecture' becomes 'vcccvcvccvcv'
for i in range(len(stem)):
if self._is_consonant(stem, i):
cv_sequence += "c"
else:
cv_sequence += "v"
# Count the number of 'vc' occurrences, which is equivalent to
# the number of 'VC' occurrences in Porter's reduced form in the
# docstring above, which is in turn equivalent to `m`
return cv_sequence.count("vc")
def _has_positive_measure(self, stem):
return self._measure(stem) > 0
def _contains_vowel(self, stem):
"""Returns True if stem contains a vowel, else False"""
for i in range(len(stem)):
if not self._is_consonant(stem, i):
return True
return False
def _ends_double_consonant(self, word):
"""Implements condition *d from the paper
Returns True if word ends with a double consonant
"""
return (
len(word) >= 2
and word[-1] == word[-2]
and self._is_consonant(word, len(word) - 1)
)
def _ends_cvc(self, word):
"""Implements condition *o from the paper
From the paper:
*o - the stem ends cvc, where the second c is not W, X or Y
(e.g. -WIL, -HOP).
"""
return (
len(word) >= 3
and self._is_consonant(word, len(word) - 3)
and not self._is_consonant(word, len(word) - 2)
and self._is_consonant(word, len(word) - 1)
and word[-1] not in ("w", "x", "y")
) or (
self.mode == self.NLTK_EXTENSIONS
and len(word) == 2
and not self._is_consonant(word, 0)
and self._is_consonant(word, 1)
)
def _replace_suffix(self, word, suffix, replacement):
"""Replaces `suffix` of `word` with `replacement"""
assert word.endswith(suffix), "Given word doesn't end with given suffix"
if suffix == "":
return word + replacement
else:
return word[: -len(suffix)] + replacement
def _apply_rule_list(self, word, rules):
"""Applies the first applicable suffix-removal rule to the word
Takes a word and a list of suffix-removal rules represented as
3-tuples, with the first element being the suffix to remove,
the second element being the string to replace it with, and the
final element being the condition for the rule to be applicable,
or None if the rule is unconditional.
"""
for rule in rules:
suffix, replacement, condition = rule
if suffix == "*d" and self._ends_double_consonant(word):
stem = word[:-2]
if condition is None or condition(stem):
return stem + replacement
else:
# Don't try any further rules
return word
if word.endswith(suffix):
stem = self._replace_suffix(word, suffix, "")
if condition is None or condition(stem):
return stem + replacement
else:
# Don't try any further rules
return word
return word
def _step1a(self, word):
"""Implements Step 1a from "An algorithm for suffix stripping"
From the paper:
SSES -> SS caresses -> caress
IES -> I ponies -> poni
ties -> ti
SS -> SS caress -> caress
S -> cats -> cat
"""
# this NLTK-only rule extends the original algorithm, so
# that 'flies'->'fli' but 'dies'->'die' etc
if self.mode == self.NLTK_EXTENSIONS:
if word.endswith("ies") and len(word) == 4:
return self._replace_suffix(word, "ies", "ie")
return self._apply_rule_list(
word,
[
("sses", "ss", None), # SSES -> SS
("ies", "i", None), # IES -> I
("ss", "ss", None), # SS -> SS
("s", "", None), # S ->
],
)
def _step1b(self, word):
"""Implements Step 1b from "An algorithm for suffix stripping"
From the paper:
(m>0) EED -> EE feed -> feed
agreed -> agree
(*v*) ED -> plastered -> plaster
bled -> bled
(*v*) ING -> motoring -> motor
sing -> sing
If the second or third of the rules in Step 1b is successful,
the following is done:
AT -> ATE conflat(ed) -> conflate
BL -> BLE troubl(ed) -> trouble
IZ -> IZE siz(ed) -> size
(*d and not (*L or *S or *Z))
-> single letter
hopp(ing) -> hop
tann(ed) -> tan
fall(ing) -> fall
hiss(ing) -> hiss
fizz(ed) -> fizz
(m=1 and *o) -> E fail(ing) -> fail
fil(ing) -> file
The rule to map to a single letter causes the removal of one of
the double letter pair. The -E is put back on -AT, -BL and -IZ,
so that the suffixes -ATE, -BLE and -IZE can be recognised
later. This E may be removed in step 4.
"""
# this NLTK-only block extends the original algorithm, so that
# 'spied'->'spi' but 'died'->'die' etc
if self.mode == self.NLTK_EXTENSIONS:
if word.endswith("ied"):
if len(word) == 4:
return self._replace_suffix(word, "ied", "ie")
else:
return self._replace_suffix(word, "ied", "i")
# (m>0) EED -> EE
if word.endswith("eed"):
stem = self._replace_suffix(word, "eed", "")
if self._measure(stem) > 0:
return stem + "ee"
else:
return word
rule_2_or_3_succeeded = False
for suffix in ["ed", "ing"]:
if word.endswith(suffix):
intermediate_stem = self._replace_suffix(word, suffix, "")
if self._contains_vowel(intermediate_stem):
rule_2_or_3_succeeded = True
break
if not rule_2_or_3_succeeded:
return word
return self._apply_rule_list(
intermediate_stem,
[
("at", "ate", None), # AT -> ATE
("bl", "ble", None), # BL -> BLE
("iz", "ize", None), # IZ -> IZE
# (*d and not (*L or *S or *Z))
# -> single letter
(
"*d",
intermediate_stem[-1],
lambda stem: intermediate_stem[-1] not in ("l", "s", "z"),
),
# (m=1 and *o) -> E
(
"",
"e",
lambda stem: (self._measure(stem) == 1 and self._ends_cvc(stem)),
),
],
)
def _step1c(self, word):
"""Implements Step 1c from "An algorithm for suffix stripping"
From the paper:
Step 1c
(*v*) Y -> I happy -> happi
sky -> sky
"""
def nltk_condition(stem):
"""
This has been modified from the original Porter algorithm so
that y->i is only done when y is preceded by a consonant,
but not if the stem is only a single consonant, i.e.
(*c and not c) Y -> I
So 'happy' -> 'happi', but
'enjoy' -> 'enjoy' etc
This is a much better rule. Formerly 'enjoy'->'enjoi' and
'enjoyment'->'enjoy'. Step 1c is perhaps done too soon; but
with this modification that no longer really matters.
Also, the removal of the contains_vowel(z) condition means
that 'spy', 'fly', 'try' ... stem to 'spi', 'fli', 'tri' and
conflate with 'spied', 'tried', 'flies' ...
"""
return len(stem) > 1 and self._is_consonant(stem, len(stem) - 1)
def original_condition(stem):
return self._contains_vowel(stem)
return self._apply_rule_list(
word,
[
(
"y",
"i",
nltk_condition
if self.mode == self.NLTK_EXTENSIONS
else original_condition,
)
],
)
def _step2(self, word):
"""Implements Step 2 from "An algorithm for suffix stripping"
From the paper:
Step 2
(m>0) ATIONAL -> ATE relational -> relate
(m>0) TIONAL -> TION conditional -> condition
rational -> rational
(m>0) ENCI -> ENCE valenci -> valence
(m>0) ANCI -> ANCE hesitanci -> hesitance
(m>0) IZER -> IZE digitizer -> digitize
(m>0) ABLI -> ABLE conformabli -> conformable
(m>0) ALLI -> AL radicalli -> radical
(m>0) ENTLI -> ENT differentli -> different
(m>0) ELI -> E vileli - > vile
(m>0) OUSLI -> OUS analogousli -> analogous
(m>0) IZATION -> IZE vietnamization -> vietnamize
(m>0) ATION -> ATE predication -> predicate
(m>0) ATOR -> ATE operator -> operate
(m>0) ALISM -> AL feudalism -> feudal
(m>0) IVENESS -> IVE decisiveness -> decisive
(m>0) FULNESS -> FUL hopefulness -> hopeful
(m>0) OUSNESS -> OUS callousness -> callous
(m>0) ALITI -> AL formaliti -> formal
(m>0) IVITI -> IVE sensitiviti -> sensitive
(m>0) BILITI -> BLE sensibiliti -> sensible
"""
if self.mode == self.NLTK_EXTENSIONS:
# Instead of applying the ALLI -> AL rule after '(a)bli' per
# the published algorithm, instead we apply it first, and,
# if it succeeds, run the result through step2 again.
if word.endswith("alli") and self._has_positive_measure(
self._replace_suffix(word, "alli", "")
):
return self._step2(self._replace_suffix(word, "alli", "al"))
bli_rule = ("bli", "ble", self._has_positive_measure)
abli_rule = ("abli", "able", self._has_positive_measure)
rules = [
("ational", "ate", self._has_positive_measure),
("tional", "tion", self._has_positive_measure),
("enci", "ence", self._has_positive_measure),
("anci", "ance", self._has_positive_measure),
("izer", "ize", self._has_positive_measure),
abli_rule if self.mode == self.ORIGINAL_ALGORITHM else bli_rule,
("alli", "al", self._has_positive_measure),
("entli", "ent", self._has_positive_measure),
("eli", "e", self._has_positive_measure),
("ousli", "ous", self._has_positive_measure),
("ization", "ize", self._has_positive_measure),
("ation", "ate", self._has_positive_measure),
("ator", "ate", self._has_positive_measure),
("alism", "al", self._has_positive_measure),
("iveness", "ive", self._has_positive_measure),
("fulness", "ful", self._has_positive_measure),
("ousness", "ous", self._has_positive_measure),
("aliti", "al", self._has_positive_measure),
("iviti", "ive", self._has_positive_measure),
("biliti", "ble", self._has_positive_measure),
]
if self.mode == self.NLTK_EXTENSIONS:
rules.append(("fulli", "ful", self._has_positive_measure))
# The 'l' of the 'logi' -> 'log' rule is put with the stem,
# so that short stems like 'geo' 'theo' etc work like
# 'archaeo' 'philo' etc.
rules.append(
("logi", "log", lambda stem: self._has_positive_measure(word[:-3]))
)
if self.mode == self.MARTIN_EXTENSIONS:
rules.append(("logi", "log", self._has_positive_measure))
return self._apply_rule_list(word, rules)
def _step3(self, word):
"""Implements Step 3 from "An algorithm for suffix stripping"
From the paper:
Step 3
(m>0) ICATE -> IC triplicate -> triplic
(m>0) ATIVE -> formative -> form
(m>0) ALIZE -> AL formalize -> formal
(m>0) ICITI -> IC electriciti -> electric
(m>0) ICAL -> IC electrical -> electric
(m>0) FUL -> hopeful -> hope
(m>0) NESS -> goodness -> good
"""
return self._apply_rule_list(
word,
[
("icate", "ic", self._has_positive_measure),
("ative", "", self._has_positive_measure),
("alize", "al", self._has_positive_measure),
("iciti", "ic", self._has_positive_measure),
("ical", "ic", self._has_positive_measure),
("ful", "", self._has_positive_measure),
("ness", "", self._has_positive_measure),
],
)
def _step4(self, word):
"""Implements Step 4 from "An algorithm for suffix stripping"
Step 4
(m>1) AL -> revival -> reviv
(m>1) ANCE -> allowance -> allow
(m>1) ENCE -> inference -> infer
(m>1) ER -> airliner -> airlin
(m>1) IC -> gyroscopic -> gyroscop
(m>1) ABLE -> adjustable -> adjust
(m>1) IBLE -> defensible -> defens
(m>1) ANT -> irritant -> irrit
(m>1) EMENT -> replacement -> replac
(m>1) MENT -> adjustment -> adjust
(m>1) ENT -> dependent -> depend
(m>1 and (*S or *T)) ION -> adoption -> adopt
(m>1) OU -> homologou -> homolog
(m>1) ISM -> communism -> commun
(m>1) ATE -> activate -> activ
(m>1) ITI -> angulariti -> angular
(m>1) OUS -> homologous -> homolog
(m>1) IVE -> effective -> effect
(m>1) IZE -> bowdlerize -> bowdler
The suffixes are now removed. All that remains is a little
tidying up.
"""
measure_gt_1 = lambda stem: self._measure(stem) > 1
return self._apply_rule_list(
word,
[
("al", "", measure_gt_1),
("ance", "", measure_gt_1),
("ence", "", measure_gt_1),
("er", "", measure_gt_1),
("ic", "", measure_gt_1),
("able", "", measure_gt_1),
("ible", "", measure_gt_1),
("ant", "", measure_gt_1),
("ement", "", measure_gt_1),
("ment", "", measure_gt_1),
("ent", "", measure_gt_1),
# (m>1 and (*S or *T)) ION ->
(
"ion",
"",
lambda stem: self._measure(stem) > 1 and stem[-1] in ("s", "t"),
),
("ou", "", measure_gt_1),
("ism", "", measure_gt_1),
("ate", "", measure_gt_1),
("iti", "", measure_gt_1),
("ous", "", measure_gt_1),
("ive", "", measure_gt_1),
("ize", "", measure_gt_1),
],
)
def _step5a(self, word):
"""Implements Step 5a from "An algorithm for suffix stripping"
From the paper:
Step 5a
(m>1) E -> probate -> probat
rate -> rate
(m=1 and not *o) E -> cease -> ceas
"""
# Note that Martin's test vocabulary and reference
# implementations are inconsistent in how they handle the case
# where two rules both refer to a suffix that matches the word
# to be stemmed, but only the condition of the second one is
# true.
# Earlier in step2b we had the rules:
# (m>0) EED -> EE
# (*v*) ED ->
# but the examples in the paper included "feed"->"feed", even
# though (*v*) is true for "fe" and therefore the second rule
# alone would map "feed"->"fe".
# However, in THIS case, we need to handle the consecutive rules
# differently and try both conditions (obviously; the second
# rule here would be redundant otherwise). Martin's paper makes
# no explicit mention of the inconsistency; you have to infer it
# from the examples.
# For this reason, we can't use _apply_rule_list here.
if word.endswith("e"):
stem = self._replace_suffix(word, "e", "")
if self._measure(stem) > 1:
return stem
if self._measure(stem) == 1 and not self._ends_cvc(stem):
return stem
return word
def _step5b(self, word):
"""Implements Step 5a from "An algorithm for suffix stripping"
From the paper:
Step 5b
(m > 1 and *d and *L) -> single letter
controll -> control
roll -> roll
"""
return self._apply_rule_list(
word, [("ll", "l", lambda stem: self._measure(word[:-1]) > 1)]
)
def stem(self, word, to_lowercase=True):
"""
:param to_lowercase: if `to_lowercase=True` the word always lowercase
"""
stem = word.lower() if to_lowercase else word
if self.mode == self.NLTK_EXTENSIONS and word in self.pool:
return self.pool[stem]
if self.mode != self.ORIGINAL_ALGORITHM and len(word) <= 2:
# With this line, strings of length 1 or 2 don't go through
# the stemming process, although no mention is made of this
# in the published algorithm.
return stem
stem = self._step1a(stem)
stem = self._step1b(stem)
stem = self._step1c(stem)
stem = self._step2(stem)
stem = self._step3(stem)
stem = self._step4(stem)
stem = self._step5a(stem)
stem = self._step5b(stem)
return stem
def __repr__(self):
return "<PorterStemmer>"
The provided code snippet includes necessary dependencies for implementing the `stem_match` function. Write a Python function `def stem_match( hypothesis: Iterable[str], reference: Iterable[str], stemmer: StemmerI = PorterStemmer(), ) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]` to solve the following problem:
Stems each word and matches them in hypothesis and reference and returns a word mapping between hypothesis and reference :param hypothesis: pre-tokenized hypothesis :param reference: pre-tokenized reference :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, enumerated unmatched reference tuples
Here is the function:
def stem_match(
hypothesis: Iterable[str],
reference: Iterable[str],
stemmer: StemmerI = PorterStemmer(),
) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]:
"""
Stems each word and matches them in hypothesis and reference
and returns a word mapping between hypothesis and reference
:param hypothesis: pre-tokenized hypothesis
:param reference: pre-tokenized reference
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:return: enumerated matched tuples, enumerated unmatched hypothesis tuples,
enumerated unmatched reference tuples
"""
enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference)
return _enum_stem_match(enum_hypothesis_list, enum_reference_list, stemmer=stemmer) | Stems each word and matches them in hypothesis and reference and returns a word mapping between hypothesis and reference :param hypothesis: pre-tokenized hypothesis :param reference: pre-tokenized reference :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, enumerated unmatched reference tuples |
170,607 | from itertools import chain, product
from typing import Callable, Iterable, List, Tuple
from nltk.corpus import WordNetCorpusReader, wordnet
from nltk.stem.api import StemmerI
from nltk.stem.porter import PorterStemmer
def _generate_enums(
hypothesis: Iterable[str],
reference: Iterable[str],
preprocess: Callable[[str], str] = str.lower,
) -> Tuple[List[Tuple[int, str]], List[Tuple[int, str]]]:
"""
Takes in pre-tokenized inputs for hypothesis and reference and returns
enumerated word lists for each of them
:param hypothesis: pre-tokenized hypothesis
:param reference: pre-tokenized reference
:preprocess: preprocessing method (default str.lower)
:return: enumerated words list
"""
if isinstance(hypothesis, str):
raise TypeError(
f'"hypothesis" expects pre-tokenized hypothesis (Iterable[str]): {hypothesis}'
)
if isinstance(reference, str):
raise TypeError(
f'"reference" expects pre-tokenized reference (Iterable[str]): {reference}'
)
enum_hypothesis_list = list(enumerate(map(preprocess, hypothesis)))
enum_reference_list = list(enumerate(map(preprocess, reference)))
return enum_hypothesis_list, enum_reference_list
def _enum_wordnetsyn_match(
enum_hypothesis_list: List[Tuple[int, str]],
enum_reference_list: List[Tuple[int, str]],
wordnet: WordNetCorpusReader = wordnet,
) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]:
"""
Matches each word in reference to a word in hypothesis
if any synonym of a hypothesis word is the exact match
to the reference word.
:param enum_hypothesis_list: enumerated hypothesis list
:param enum_reference_list: enumerated reference list
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
"""
word_match = []
for i in range(len(enum_hypothesis_list))[::-1]:
hypothesis_syns = set(
chain.from_iterable(
(
lemma.name()
for lemma in synset.lemmas()
if lemma.name().find("_") < 0
)
for synset in wordnet.synsets(enum_hypothesis_list[i][1])
)
).union({enum_hypothesis_list[i][1]})
for j in range(len(enum_reference_list))[::-1]:
if enum_reference_list[j][1] in hypothesis_syns:
word_match.append(
(enum_hypothesis_list[i][0], enum_reference_list[j][0])
)
enum_hypothesis_list.pop(i)
enum_reference_list.pop(j)
break
return word_match, enum_hypothesis_list, enum_reference_list
List = _Alias()
class Iterable(Protocol[_T_co]):
def __iter__(self) -> Iterator[_T_co]: ...
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
# is used.
return self._generics_manager.is_homogenous_tuple()
def py__simple_getitem__(self, index):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
else:
if isinstance(index, int):
return self._generics_manager.get_index_and_execute(index)
debug.dbg('The getitem type on Tuple was %s' % index)
return NO_VALUES
def py__iter__(self, contextualized_node=None):
if self._is_homogenous():
yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
else:
for v in self._generics_manager.to_tuple():
yield LazyKnownValues(v.execute_annotation())
def py__getitem__(self, index_value_set, contextualized_node):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
return ValueSet.from_sets(
self._generics_manager.to_tuple()
).execute_annotation()
def _get_wrapped_value(self):
tuple_, = self.inference_state.builtins_module \
.py__getattribute__('tuple').execute_annotation()
return tuple_
def name(self):
return self._wrapped_value.name
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
value_set = value_set.filter(
lambda x: x.py__name__().lower() == 'tuple',
)
if self._is_homogenous():
# The parameter annotation is of the form `Tuple[T, ...]`,
# so we treat the incoming tuple like a iterable sequence
# rather than a positional container of elements.
return self._class_value.get_generics()[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# The parameter annotation has only explicit type parameters
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
# treat the incoming values as needing to match the annotation
# exactly, just as we would for non-tuple annotations.
type_var_dict = {}
for element in value_set:
try:
method = element.get_annotated_class_object
except AttributeError:
# This might still happen, because the tuple name matching
# above is not 100% correct, so just catch the remaining
# cases here.
continue
py_class = method()
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self._class_value, py_class),
)
return type_var_dict
The provided code snippet includes necessary dependencies for implementing the `wordnetsyn_match` function. Write a Python function `def wordnetsyn_match( hypothesis: Iterable[str], reference: Iterable[str], wordnet: WordNetCorpusReader = wordnet, ) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]` to solve the following problem:
Matches each word in reference to a word in hypothesis if any synonym of a hypothesis word is the exact match to the reference word. :param hypothesis: pre-tokenized hypothesis :param reference: pre-tokenized reference :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) :return: list of mapped tuples
Here is the function:
def wordnetsyn_match(
hypothesis: Iterable[str],
reference: Iterable[str],
wordnet: WordNetCorpusReader = wordnet,
) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]:
"""
Matches each word in reference to a word in hypothesis if any synonym
of a hypothesis word is the exact match to the reference word.
:param hypothesis: pre-tokenized hypothesis
:param reference: pre-tokenized reference
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:return: list of mapped tuples
"""
enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference)
return _enum_wordnetsyn_match(
enum_hypothesis_list, enum_reference_list, wordnet=wordnet
) | Matches each word in reference to a word in hypothesis if any synonym of a hypothesis word is the exact match to the reference word. :param hypothesis: pre-tokenized hypothesis :param reference: pre-tokenized reference :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) :return: list of mapped tuples |
170,608 | from itertools import chain, product
from typing import Callable, Iterable, List, Tuple
from nltk.corpus import WordNetCorpusReader, wordnet
from nltk.stem.api import StemmerI
from nltk.stem.porter import PorterStemmer
def _generate_enums(
hypothesis: Iterable[str],
reference: Iterable[str],
preprocess: Callable[[str], str] = str.lower,
) -> Tuple[List[Tuple[int, str]], List[Tuple[int, str]]]:
"""
Takes in pre-tokenized inputs for hypothesis and reference and returns
enumerated word lists for each of them
:param hypothesis: pre-tokenized hypothesis
:param reference: pre-tokenized reference
:preprocess: preprocessing method (default str.lower)
:return: enumerated words list
"""
if isinstance(hypothesis, str):
raise TypeError(
f'"hypothesis" expects pre-tokenized hypothesis (Iterable[str]): {hypothesis}'
)
if isinstance(reference, str):
raise TypeError(
f'"reference" expects pre-tokenized reference (Iterable[str]): {reference}'
)
enum_hypothesis_list = list(enumerate(map(preprocess, hypothesis)))
enum_reference_list = list(enumerate(map(preprocess, reference)))
return enum_hypothesis_list, enum_reference_list
def _enum_align_words(
enum_hypothesis_list: List[Tuple[int, str]],
enum_reference_list: List[Tuple[int, str]],
stemmer: StemmerI = PorterStemmer(),
wordnet: WordNetCorpusReader = wordnet,
) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]:
"""
Aligns/matches words in the hypothesis to reference by sequentially
applying exact match, stemmed match and wordnet based synonym match.
in case there are multiple matches the match which has the least number
of crossing is chosen. Takes enumerated list as input instead of
string input
:param enum_hypothesis_list: enumerated hypothesis list
:param enum_reference_list: enumerated reference list
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:return: sorted list of matched tuples, unmatched hypothesis list,
unmatched reference list
"""
exact_matches, enum_hypothesis_list, enum_reference_list = _match_enums(
enum_hypothesis_list, enum_reference_list
)
stem_matches, enum_hypothesis_list, enum_reference_list = _enum_stem_match(
enum_hypothesis_list, enum_reference_list, stemmer=stemmer
)
wns_matches, enum_hypothesis_list, enum_reference_list = _enum_wordnetsyn_match(
enum_hypothesis_list, enum_reference_list, wordnet=wordnet
)
return (
sorted(
exact_matches + stem_matches + wns_matches, key=lambda wordpair: wordpair[0]
),
enum_hypothesis_list,
enum_reference_list,
)
List = _Alias()
class Iterable(Protocol[_T_co]):
def __iter__(self) -> Iterator[_T_co]: ...
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
# is used.
return self._generics_manager.is_homogenous_tuple()
def py__simple_getitem__(self, index):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
else:
if isinstance(index, int):
return self._generics_manager.get_index_and_execute(index)
debug.dbg('The getitem type on Tuple was %s' % index)
return NO_VALUES
def py__iter__(self, contextualized_node=None):
if self._is_homogenous():
yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
else:
for v in self._generics_manager.to_tuple():
yield LazyKnownValues(v.execute_annotation())
def py__getitem__(self, index_value_set, contextualized_node):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
return ValueSet.from_sets(
self._generics_manager.to_tuple()
).execute_annotation()
def _get_wrapped_value(self):
tuple_, = self.inference_state.builtins_module \
.py__getattribute__('tuple').execute_annotation()
return tuple_
def name(self):
return self._wrapped_value.name
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
value_set = value_set.filter(
lambda x: x.py__name__().lower() == 'tuple',
)
if self._is_homogenous():
# The parameter annotation is of the form `Tuple[T, ...]`,
# so we treat the incoming tuple like a iterable sequence
# rather than a positional container of elements.
return self._class_value.get_generics()[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# The parameter annotation has only explicit type parameters
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
# treat the incoming values as needing to match the annotation
# exactly, just as we would for non-tuple annotations.
type_var_dict = {}
for element in value_set:
try:
method = element.get_annotated_class_object
except AttributeError:
# This might still happen, because the tuple name matching
# above is not 100% correct, so just catch the remaining
# cases here.
continue
py_class = method()
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self._class_value, py_class),
)
return type_var_dict
class StemmerI(metaclass=ABCMeta):
"""
A processing interface for removing morphological affixes from
words. This process is known as stemming.
"""
def stem(self, token):
"""
Strip affixes from the token and return the stem.
:param token: The token that should be stemmed.
:type token: str
"""
class PorterStemmer(StemmerI):
"""
A word stemmer based on the Porter stemming algorithm.
Porter, M. "An algorithm for suffix stripping."
Program 14.3 (1980): 130-137.
See https://www.tartarus.org/~martin/PorterStemmer/ for the homepage
of the algorithm.
Martin Porter has endorsed several modifications to the Porter
algorithm since writing his original paper, and those extensions are
included in the implementations on his website. Additionally, others
have proposed further improvements to the algorithm, including NLTK
contributors. There are thus three modes that can be selected by
passing the appropriate constant to the class constructor's `mode`
attribute:
- PorterStemmer.ORIGINAL_ALGORITHM
An implementation that is faithful to the original paper.
Note that Martin Porter has deprecated this version of the
algorithm. Martin distributes implementations of the Porter
Stemmer in many languages, hosted at:
https://www.tartarus.org/~martin/PorterStemmer/
and all of these implementations include his extensions. He
strongly recommends against using the original, published
version of the algorithm; only use this mode if you clearly
understand why you are choosing to do so.
- PorterStemmer.MARTIN_EXTENSIONS
An implementation that only uses the modifications to the
algorithm that are included in the implementations on Martin
Porter's website. He has declared Porter frozen, so the
behaviour of those implementations should never change.
- PorterStemmer.NLTK_EXTENSIONS (default)
An implementation that includes further improvements devised by
NLTK contributors or taken from other modified implementations
found on the web.
For the best stemming, you should use the default NLTK_EXTENSIONS
version. However, if you need to get the same results as either the
original algorithm or one of Martin Porter's hosted versions for
compatibility with an existing implementation or dataset, you can use
one of the other modes instead.
"""
# Modes the Stemmer can be instantiated in
NLTK_EXTENSIONS = "NLTK_EXTENSIONS"
MARTIN_EXTENSIONS = "MARTIN_EXTENSIONS"
ORIGINAL_ALGORITHM = "ORIGINAL_ALGORITHM"
def __init__(self, mode=NLTK_EXTENSIONS):
if mode not in (
self.NLTK_EXTENSIONS,
self.MARTIN_EXTENSIONS,
self.ORIGINAL_ALGORITHM,
):
raise ValueError(
"Mode must be one of PorterStemmer.NLTK_EXTENSIONS, "
"PorterStemmer.MARTIN_EXTENSIONS, or "
"PorterStemmer.ORIGINAL_ALGORITHM"
)
self.mode = mode
if self.mode == self.NLTK_EXTENSIONS:
# This is a table of irregular forms. It is quite short,
# but still reflects the errors actually drawn to Martin
# Porter's attention over a 20 year period!
irregular_forms = {
"sky": ["sky", "skies"],
"die": ["dying"],
"lie": ["lying"],
"tie": ["tying"],
"news": ["news"],
"inning": ["innings", "inning"],
"outing": ["outings", "outing"],
"canning": ["cannings", "canning"],
"howe": ["howe"],
"proceed": ["proceed"],
"exceed": ["exceed"],
"succeed": ["succeed"],
}
self.pool = {}
for key in irregular_forms:
for val in irregular_forms[key]:
self.pool[val] = key
self.vowels = frozenset(["a", "e", "i", "o", "u"])
def _is_consonant(self, word, i):
"""Returns True if word[i] is a consonant, False otherwise
A consonant is defined in the paper as follows:
A consonant in a word is a letter other than A, E, I, O or
U, and other than Y preceded by a consonant. (The fact that
the term `consonant' is defined to some extent in terms of
itself does not make it ambiguous.) So in TOY the consonants
are T and Y, and in SYZYGY they are S, Z and G. If a letter
is not a consonant it is a vowel.
"""
if word[i] in self.vowels:
return False
if word[i] == "y":
if i == 0:
return True
else:
return not self._is_consonant(word, i - 1)
return True
def _measure(self, stem):
r"""Returns the 'measure' of stem, per definition in the paper
From the paper:
A consonant will be denoted by c, a vowel by v. A list
ccc... of length greater than 0 will be denoted by C, and a
list vvv... of length greater than 0 will be denoted by V.
Any word, or part of a word, therefore has one of the four
forms:
CVCV ... C
CVCV ... V
VCVC ... C
VCVC ... V
These may all be represented by the single form
[C]VCVC ... [V]
where the square brackets denote arbitrary presence of their
contents. Using (VC){m} to denote VC repeated m times, this
may again be written as
[C](VC){m}[V].
m will be called the \measure\ of any word or word part when
represented in this form. The case m = 0 covers the null
word. Here are some examples:
m=0 TR, EE, TREE, Y, BY.
m=1 TROUBLE, OATS, TREES, IVY.
m=2 TROUBLES, PRIVATE, OATEN, ORRERY.
"""
cv_sequence = ""
# Construct a string of 'c's and 'v's representing whether each
# character in `stem` is a consonant or a vowel.
# e.g. 'falafel' becomes 'cvcvcvc',
# 'architecture' becomes 'vcccvcvccvcv'
for i in range(len(stem)):
if self._is_consonant(stem, i):
cv_sequence += "c"
else:
cv_sequence += "v"
# Count the number of 'vc' occurrences, which is equivalent to
# the number of 'VC' occurrences in Porter's reduced form in the
# docstring above, which is in turn equivalent to `m`
return cv_sequence.count("vc")
def _has_positive_measure(self, stem):
return self._measure(stem) > 0
def _contains_vowel(self, stem):
"""Returns True if stem contains a vowel, else False"""
for i in range(len(stem)):
if not self._is_consonant(stem, i):
return True
return False
def _ends_double_consonant(self, word):
"""Implements condition *d from the paper
Returns True if word ends with a double consonant
"""
return (
len(word) >= 2
and word[-1] == word[-2]
and self._is_consonant(word, len(word) - 1)
)
def _ends_cvc(self, word):
"""Implements condition *o from the paper
From the paper:
*o - the stem ends cvc, where the second c is not W, X or Y
(e.g. -WIL, -HOP).
"""
return (
len(word) >= 3
and self._is_consonant(word, len(word) - 3)
and not self._is_consonant(word, len(word) - 2)
and self._is_consonant(word, len(word) - 1)
and word[-1] not in ("w", "x", "y")
) or (
self.mode == self.NLTK_EXTENSIONS
and len(word) == 2
and not self._is_consonant(word, 0)
and self._is_consonant(word, 1)
)
def _replace_suffix(self, word, suffix, replacement):
"""Replaces `suffix` of `word` with `replacement"""
assert word.endswith(suffix), "Given word doesn't end with given suffix"
if suffix == "":
return word + replacement
else:
return word[: -len(suffix)] + replacement
def _apply_rule_list(self, word, rules):
"""Applies the first applicable suffix-removal rule to the word
Takes a word and a list of suffix-removal rules represented as
3-tuples, with the first element being the suffix to remove,
the second element being the string to replace it with, and the
final element being the condition for the rule to be applicable,
or None if the rule is unconditional.
"""
for rule in rules:
suffix, replacement, condition = rule
if suffix == "*d" and self._ends_double_consonant(word):
stem = word[:-2]
if condition is None or condition(stem):
return stem + replacement
else:
# Don't try any further rules
return word
if word.endswith(suffix):
stem = self._replace_suffix(word, suffix, "")
if condition is None or condition(stem):
return stem + replacement
else:
# Don't try any further rules
return word
return word
def _step1a(self, word):
"""Implements Step 1a from "An algorithm for suffix stripping"
From the paper:
SSES -> SS caresses -> caress
IES -> I ponies -> poni
ties -> ti
SS -> SS caress -> caress
S -> cats -> cat
"""
# this NLTK-only rule extends the original algorithm, so
# that 'flies'->'fli' but 'dies'->'die' etc
if self.mode == self.NLTK_EXTENSIONS:
if word.endswith("ies") and len(word) == 4:
return self._replace_suffix(word, "ies", "ie")
return self._apply_rule_list(
word,
[
("sses", "ss", None), # SSES -> SS
("ies", "i", None), # IES -> I
("ss", "ss", None), # SS -> SS
("s", "", None), # S ->
],
)
def _step1b(self, word):
"""Implements Step 1b from "An algorithm for suffix stripping"
From the paper:
(m>0) EED -> EE feed -> feed
agreed -> agree
(*v*) ED -> plastered -> plaster
bled -> bled
(*v*) ING -> motoring -> motor
sing -> sing
If the second or third of the rules in Step 1b is successful,
the following is done:
AT -> ATE conflat(ed) -> conflate
BL -> BLE troubl(ed) -> trouble
IZ -> IZE siz(ed) -> size
(*d and not (*L or *S or *Z))
-> single letter
hopp(ing) -> hop
tann(ed) -> tan
fall(ing) -> fall
hiss(ing) -> hiss
fizz(ed) -> fizz
(m=1 and *o) -> E fail(ing) -> fail
fil(ing) -> file
The rule to map to a single letter causes the removal of one of
the double letter pair. The -E is put back on -AT, -BL and -IZ,
so that the suffixes -ATE, -BLE and -IZE can be recognised
later. This E may be removed in step 4.
"""
# this NLTK-only block extends the original algorithm, so that
# 'spied'->'spi' but 'died'->'die' etc
if self.mode == self.NLTK_EXTENSIONS:
if word.endswith("ied"):
if len(word) == 4:
return self._replace_suffix(word, "ied", "ie")
else:
return self._replace_suffix(word, "ied", "i")
# (m>0) EED -> EE
if word.endswith("eed"):
stem = self._replace_suffix(word, "eed", "")
if self._measure(stem) > 0:
return stem + "ee"
else:
return word
rule_2_or_3_succeeded = False
for suffix in ["ed", "ing"]:
if word.endswith(suffix):
intermediate_stem = self._replace_suffix(word, suffix, "")
if self._contains_vowel(intermediate_stem):
rule_2_or_3_succeeded = True
break
if not rule_2_or_3_succeeded:
return word
return self._apply_rule_list(
intermediate_stem,
[
("at", "ate", None), # AT -> ATE
("bl", "ble", None), # BL -> BLE
("iz", "ize", None), # IZ -> IZE
# (*d and not (*L or *S or *Z))
# -> single letter
(
"*d",
intermediate_stem[-1],
lambda stem: intermediate_stem[-1] not in ("l", "s", "z"),
),
# (m=1 and *o) -> E
(
"",
"e",
lambda stem: (self._measure(stem) == 1 and self._ends_cvc(stem)),
),
],
)
def _step1c(self, word):
"""Implements Step 1c from "An algorithm for suffix stripping"
From the paper:
Step 1c
(*v*) Y -> I happy -> happi
sky -> sky
"""
def nltk_condition(stem):
"""
This has been modified from the original Porter algorithm so
that y->i is only done when y is preceded by a consonant,
but not if the stem is only a single consonant, i.e.
(*c and not c) Y -> I
So 'happy' -> 'happi', but
'enjoy' -> 'enjoy' etc
This is a much better rule. Formerly 'enjoy'->'enjoi' and
'enjoyment'->'enjoy'. Step 1c is perhaps done too soon; but
with this modification that no longer really matters.
Also, the removal of the contains_vowel(z) condition means
that 'spy', 'fly', 'try' ... stem to 'spi', 'fli', 'tri' and
conflate with 'spied', 'tried', 'flies' ...
"""
return len(stem) > 1 and self._is_consonant(stem, len(stem) - 1)
def original_condition(stem):
return self._contains_vowel(stem)
return self._apply_rule_list(
word,
[
(
"y",
"i",
nltk_condition
if self.mode == self.NLTK_EXTENSIONS
else original_condition,
)
],
)
def _step2(self, word):
"""Implements Step 2 from "An algorithm for suffix stripping"
From the paper:
Step 2
(m>0) ATIONAL -> ATE relational -> relate
(m>0) TIONAL -> TION conditional -> condition
rational -> rational
(m>0) ENCI -> ENCE valenci -> valence
(m>0) ANCI -> ANCE hesitanci -> hesitance
(m>0) IZER -> IZE digitizer -> digitize
(m>0) ABLI -> ABLE conformabli -> conformable
(m>0) ALLI -> AL radicalli -> radical
(m>0) ENTLI -> ENT differentli -> different
(m>0) ELI -> E vileli - > vile
(m>0) OUSLI -> OUS analogousli -> analogous
(m>0) IZATION -> IZE vietnamization -> vietnamize
(m>0) ATION -> ATE predication -> predicate
(m>0) ATOR -> ATE operator -> operate
(m>0) ALISM -> AL feudalism -> feudal
(m>0) IVENESS -> IVE decisiveness -> decisive
(m>0) FULNESS -> FUL hopefulness -> hopeful
(m>0) OUSNESS -> OUS callousness -> callous
(m>0) ALITI -> AL formaliti -> formal
(m>0) IVITI -> IVE sensitiviti -> sensitive
(m>0) BILITI -> BLE sensibiliti -> sensible
"""
if self.mode == self.NLTK_EXTENSIONS:
# Instead of applying the ALLI -> AL rule after '(a)bli' per
# the published algorithm, instead we apply it first, and,
# if it succeeds, run the result through step2 again.
if word.endswith("alli") and self._has_positive_measure(
self._replace_suffix(word, "alli", "")
):
return self._step2(self._replace_suffix(word, "alli", "al"))
bli_rule = ("bli", "ble", self._has_positive_measure)
abli_rule = ("abli", "able", self._has_positive_measure)
rules = [
("ational", "ate", self._has_positive_measure),
("tional", "tion", self._has_positive_measure),
("enci", "ence", self._has_positive_measure),
("anci", "ance", self._has_positive_measure),
("izer", "ize", self._has_positive_measure),
abli_rule if self.mode == self.ORIGINAL_ALGORITHM else bli_rule,
("alli", "al", self._has_positive_measure),
("entli", "ent", self._has_positive_measure),
("eli", "e", self._has_positive_measure),
("ousli", "ous", self._has_positive_measure),
("ization", "ize", self._has_positive_measure),
("ation", "ate", self._has_positive_measure),
("ator", "ate", self._has_positive_measure),
("alism", "al", self._has_positive_measure),
("iveness", "ive", self._has_positive_measure),
("fulness", "ful", self._has_positive_measure),
("ousness", "ous", self._has_positive_measure),
("aliti", "al", self._has_positive_measure),
("iviti", "ive", self._has_positive_measure),
("biliti", "ble", self._has_positive_measure),
]
if self.mode == self.NLTK_EXTENSIONS:
rules.append(("fulli", "ful", self._has_positive_measure))
# The 'l' of the 'logi' -> 'log' rule is put with the stem,
# so that short stems like 'geo' 'theo' etc work like
# 'archaeo' 'philo' etc.
rules.append(
("logi", "log", lambda stem: self._has_positive_measure(word[:-3]))
)
if self.mode == self.MARTIN_EXTENSIONS:
rules.append(("logi", "log", self._has_positive_measure))
return self._apply_rule_list(word, rules)
def _step3(self, word):
"""Implements Step 3 from "An algorithm for suffix stripping"
From the paper:
Step 3
(m>0) ICATE -> IC triplicate -> triplic
(m>0) ATIVE -> formative -> form
(m>0) ALIZE -> AL formalize -> formal
(m>0) ICITI -> IC electriciti -> electric
(m>0) ICAL -> IC electrical -> electric
(m>0) FUL -> hopeful -> hope
(m>0) NESS -> goodness -> good
"""
return self._apply_rule_list(
word,
[
("icate", "ic", self._has_positive_measure),
("ative", "", self._has_positive_measure),
("alize", "al", self._has_positive_measure),
("iciti", "ic", self._has_positive_measure),
("ical", "ic", self._has_positive_measure),
("ful", "", self._has_positive_measure),
("ness", "", self._has_positive_measure),
],
)
def _step4(self, word):
"""Implements Step 4 from "An algorithm for suffix stripping"
Step 4
(m>1) AL -> revival -> reviv
(m>1) ANCE -> allowance -> allow
(m>1) ENCE -> inference -> infer
(m>1) ER -> airliner -> airlin
(m>1) IC -> gyroscopic -> gyroscop
(m>1) ABLE -> adjustable -> adjust
(m>1) IBLE -> defensible -> defens
(m>1) ANT -> irritant -> irrit
(m>1) EMENT -> replacement -> replac
(m>1) MENT -> adjustment -> adjust
(m>1) ENT -> dependent -> depend
(m>1 and (*S or *T)) ION -> adoption -> adopt
(m>1) OU -> homologou -> homolog
(m>1) ISM -> communism -> commun
(m>1) ATE -> activate -> activ
(m>1) ITI -> angulariti -> angular
(m>1) OUS -> homologous -> homolog
(m>1) IVE -> effective -> effect
(m>1) IZE -> bowdlerize -> bowdler
The suffixes are now removed. All that remains is a little
tidying up.
"""
measure_gt_1 = lambda stem: self._measure(stem) > 1
return self._apply_rule_list(
word,
[
("al", "", measure_gt_1),
("ance", "", measure_gt_1),
("ence", "", measure_gt_1),
("er", "", measure_gt_1),
("ic", "", measure_gt_1),
("able", "", measure_gt_1),
("ible", "", measure_gt_1),
("ant", "", measure_gt_1),
("ement", "", measure_gt_1),
("ment", "", measure_gt_1),
("ent", "", measure_gt_1),
# (m>1 and (*S or *T)) ION ->
(
"ion",
"",
lambda stem: self._measure(stem) > 1 and stem[-1] in ("s", "t"),
),
("ou", "", measure_gt_1),
("ism", "", measure_gt_1),
("ate", "", measure_gt_1),
("iti", "", measure_gt_1),
("ous", "", measure_gt_1),
("ive", "", measure_gt_1),
("ize", "", measure_gt_1),
],
)
def _step5a(self, word):
"""Implements Step 5a from "An algorithm for suffix stripping"
From the paper:
Step 5a
(m>1) E -> probate -> probat
rate -> rate
(m=1 and not *o) E -> cease -> ceas
"""
# Note that Martin's test vocabulary and reference
# implementations are inconsistent in how they handle the case
# where two rules both refer to a suffix that matches the word
# to be stemmed, but only the condition of the second one is
# true.
# Earlier in step2b we had the rules:
# (m>0) EED -> EE
# (*v*) ED ->
# but the examples in the paper included "feed"->"feed", even
# though (*v*) is true for "fe" and therefore the second rule
# alone would map "feed"->"fe".
# However, in THIS case, we need to handle the consecutive rules
# differently and try both conditions (obviously; the second
# rule here would be redundant otherwise). Martin's paper makes
# no explicit mention of the inconsistency; you have to infer it
# from the examples.
# For this reason, we can't use _apply_rule_list here.
if word.endswith("e"):
stem = self._replace_suffix(word, "e", "")
if self._measure(stem) > 1:
return stem
if self._measure(stem) == 1 and not self._ends_cvc(stem):
return stem
return word
def _step5b(self, word):
"""Implements Step 5a from "An algorithm for suffix stripping"
From the paper:
Step 5b
(m > 1 and *d and *L) -> single letter
controll -> control
roll -> roll
"""
return self._apply_rule_list(
word, [("ll", "l", lambda stem: self._measure(word[:-1]) > 1)]
)
def stem(self, word, to_lowercase=True):
"""
:param to_lowercase: if `to_lowercase=True` the word always lowercase
"""
stem = word.lower() if to_lowercase else word
if self.mode == self.NLTK_EXTENSIONS and word in self.pool:
return self.pool[stem]
if self.mode != self.ORIGINAL_ALGORITHM and len(word) <= 2:
# With this line, strings of length 1 or 2 don't go through
# the stemming process, although no mention is made of this
# in the published algorithm.
return stem
stem = self._step1a(stem)
stem = self._step1b(stem)
stem = self._step1c(stem)
stem = self._step2(stem)
stem = self._step3(stem)
stem = self._step4(stem)
stem = self._step5a(stem)
stem = self._step5b(stem)
return stem
def __repr__(self):
return "<PorterStemmer>"
The provided code snippet includes necessary dependencies for implementing the `align_words` function. Write a Python function `def align_words( hypothesis: Iterable[str], reference: Iterable[str], stemmer: StemmerI = PorterStemmer(), wordnet: WordNetCorpusReader = wordnet, ) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]` to solve the following problem:
Aligns/matches words in the hypothesis to reference by sequentially applying exact match, stemmed match and wordnet based synonym match. In case there are multiple matches the match which has the least number of crossing is chosen. :param hypothesis: pre-tokenized hypothesis :param reference: pre-tokenized reference :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) :return: sorted list of matched tuples, unmatched hypothesis list, unmatched reference list
Here is the function:
def align_words(
hypothesis: Iterable[str],
reference: Iterable[str],
stemmer: StemmerI = PorterStemmer(),
wordnet: WordNetCorpusReader = wordnet,
) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]:
"""
Aligns/matches words in the hypothesis to reference by sequentially
applying exact match, stemmed match and wordnet based synonym match.
In case there are multiple matches the match which has the least number
of crossing is chosen.
:param hypothesis: pre-tokenized hypothesis
:param reference: pre-tokenized reference
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:return: sorted list of matched tuples, unmatched hypothesis list, unmatched reference list
"""
enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference)
return _enum_align_words(
enum_hypothesis_list, enum_reference_list, stemmer=stemmer, wordnet=wordnet
) | Aligns/matches words in the hypothesis to reference by sequentially applying exact match, stemmed match and wordnet based synonym match. In case there are multiple matches the match which has the least number of crossing is chosen. :param hypothesis: pre-tokenized hypothesis :param reference: pre-tokenized reference :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) :return: sorted list of matched tuples, unmatched hypothesis list, unmatched reference list |
170,609 | from itertools import chain, product
from typing import Callable, Iterable, List, Tuple
from nltk.corpus import WordNetCorpusReader, wordnet
from nltk.stem.api import StemmerI
from nltk.stem.porter import PorterStemmer
def single_meteor_score(
reference: Iterable[str],
hypothesis: Iterable[str],
preprocess: Callable[[str], str] = str.lower,
stemmer: StemmerI = PorterStemmer(),
wordnet: WordNetCorpusReader = wordnet,
alpha: float = 0.9,
beta: float = 3.0,
gamma: float = 0.5,
) -> float:
"""
Calculates METEOR score for single hypothesis and reference as per
"Meteor: An Automatic Metric for MT Evaluation with HighLevels of
Correlation with Human Judgments" by Alon Lavie and Abhaya Agarwal,
in Proceedings of ACL.
https://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures', 'that', 'the', 'military', 'always', 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', 'that', 'the', 'military', 'will', 'forever', 'heed', 'Party', 'commands']
>>> round(single_meteor_score(reference1, hypothesis1),4)
0.6944
If there is no words match during the alignment the method returns the
score as 0. We can safely return a zero instead of raising a
division by zero error as no match usually implies a bad translation.
>>> round(single_meteor_score(['this', 'is', 'a', 'cat'], ['non', 'matching', 'hypothesis']),4)
0.0
:param reference: pre-tokenized reference
:param hypothesis: pre-tokenized hypothesis
:param preprocess: preprocessing function (default str.lower)
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:param alpha: parameter for controlling relative weights of precision and recall.
:param beta: parameter for controlling shape of penalty as a
function of as a function of fragmentation.
:param gamma: relative weight assigned to fragmentation penalty.
:return: The sentence-level METEOR score.
"""
enum_hypothesis, enum_reference = _generate_enums(
hypothesis, reference, preprocess=preprocess
)
translation_length = len(enum_hypothesis)
reference_length = len(enum_reference)
matches, _, _ = _enum_align_words(
enum_hypothesis, enum_reference, stemmer=stemmer, wordnet=wordnet
)
matches_count = len(matches)
try:
precision = float(matches_count) / translation_length
recall = float(matches_count) / reference_length
fmean = (precision * recall) / (alpha * precision + (1 - alpha) * recall)
chunk_count = float(_count_chunks(matches))
frag_frac = chunk_count / matches_count
except ZeroDivisionError:
return 0.0
penalty = gamma * frag_frac**beta
return (1 - penalty) * fmean
class Callable(BaseTypingInstance):
def py__call__(self, arguments):
"""
def x() -> Callable[[Callable[..., _T]], _T]: ...
"""
# The 0th index are the arguments.
try:
param_values = self._generics_manager[0]
result_values = self._generics_manager[1]
except IndexError:
debug.warning('Callable[...] defined without two arguments')
return NO_VALUES
else:
from jedi.inference.gradual.annotation import infer_return_for_callable
return infer_return_for_callable(arguments, param_values, result_values)
def py__get__(self, instance, class_value):
return ValueSet([self])
class Iterable(Protocol[_T_co]):
def __iter__(self) -> Iterator[_T_co]: ...
class StemmerI(metaclass=ABCMeta):
"""
A processing interface for removing morphological affixes from
words. This process is known as stemming.
"""
def stem(self, token):
"""
Strip affixes from the token and return the stem.
:param token: The token that should be stemmed.
:type token: str
"""
class PorterStemmer(StemmerI):
"""
A word stemmer based on the Porter stemming algorithm.
Porter, M. "An algorithm for suffix stripping."
Program 14.3 (1980): 130-137.
See https://www.tartarus.org/~martin/PorterStemmer/ for the homepage
of the algorithm.
Martin Porter has endorsed several modifications to the Porter
algorithm since writing his original paper, and those extensions are
included in the implementations on his website. Additionally, others
have proposed further improvements to the algorithm, including NLTK
contributors. There are thus three modes that can be selected by
passing the appropriate constant to the class constructor's `mode`
attribute:
- PorterStemmer.ORIGINAL_ALGORITHM
An implementation that is faithful to the original paper.
Note that Martin Porter has deprecated this version of the
algorithm. Martin distributes implementations of the Porter
Stemmer in many languages, hosted at:
https://www.tartarus.org/~martin/PorterStemmer/
and all of these implementations include his extensions. He
strongly recommends against using the original, published
version of the algorithm; only use this mode if you clearly
understand why you are choosing to do so.
- PorterStemmer.MARTIN_EXTENSIONS
An implementation that only uses the modifications to the
algorithm that are included in the implementations on Martin
Porter's website. He has declared Porter frozen, so the
behaviour of those implementations should never change.
- PorterStemmer.NLTK_EXTENSIONS (default)
An implementation that includes further improvements devised by
NLTK contributors or taken from other modified implementations
found on the web.
For the best stemming, you should use the default NLTK_EXTENSIONS
version. However, if you need to get the same results as either the
original algorithm or one of Martin Porter's hosted versions for
compatibility with an existing implementation or dataset, you can use
one of the other modes instead.
"""
# Modes the Stemmer can be instantiated in
NLTK_EXTENSIONS = "NLTK_EXTENSIONS"
MARTIN_EXTENSIONS = "MARTIN_EXTENSIONS"
ORIGINAL_ALGORITHM = "ORIGINAL_ALGORITHM"
def __init__(self, mode=NLTK_EXTENSIONS):
if mode not in (
self.NLTK_EXTENSIONS,
self.MARTIN_EXTENSIONS,
self.ORIGINAL_ALGORITHM,
):
raise ValueError(
"Mode must be one of PorterStemmer.NLTK_EXTENSIONS, "
"PorterStemmer.MARTIN_EXTENSIONS, or "
"PorterStemmer.ORIGINAL_ALGORITHM"
)
self.mode = mode
if self.mode == self.NLTK_EXTENSIONS:
# This is a table of irregular forms. It is quite short,
# but still reflects the errors actually drawn to Martin
# Porter's attention over a 20 year period!
irregular_forms = {
"sky": ["sky", "skies"],
"die": ["dying"],
"lie": ["lying"],
"tie": ["tying"],
"news": ["news"],
"inning": ["innings", "inning"],
"outing": ["outings", "outing"],
"canning": ["cannings", "canning"],
"howe": ["howe"],
"proceed": ["proceed"],
"exceed": ["exceed"],
"succeed": ["succeed"],
}
self.pool = {}
for key in irregular_forms:
for val in irregular_forms[key]:
self.pool[val] = key
self.vowels = frozenset(["a", "e", "i", "o", "u"])
def _is_consonant(self, word, i):
"""Returns True if word[i] is a consonant, False otherwise
A consonant is defined in the paper as follows:
A consonant in a word is a letter other than A, E, I, O or
U, and other than Y preceded by a consonant. (The fact that
the term `consonant' is defined to some extent in terms of
itself does not make it ambiguous.) So in TOY the consonants
are T and Y, and in SYZYGY they are S, Z and G. If a letter
is not a consonant it is a vowel.
"""
if word[i] in self.vowels:
return False
if word[i] == "y":
if i == 0:
return True
else:
return not self._is_consonant(word, i - 1)
return True
def _measure(self, stem):
r"""Returns the 'measure' of stem, per definition in the paper
From the paper:
A consonant will be denoted by c, a vowel by v. A list
ccc... of length greater than 0 will be denoted by C, and a
list vvv... of length greater than 0 will be denoted by V.
Any word, or part of a word, therefore has one of the four
forms:
CVCV ... C
CVCV ... V
VCVC ... C
VCVC ... V
These may all be represented by the single form
[C]VCVC ... [V]
where the square brackets denote arbitrary presence of their
contents. Using (VC){m} to denote VC repeated m times, this
may again be written as
[C](VC){m}[V].
m will be called the \measure\ of any word or word part when
represented in this form. The case m = 0 covers the null
word. Here are some examples:
m=0 TR, EE, TREE, Y, BY.
m=1 TROUBLE, OATS, TREES, IVY.
m=2 TROUBLES, PRIVATE, OATEN, ORRERY.
"""
cv_sequence = ""
# Construct a string of 'c's and 'v's representing whether each
# character in `stem` is a consonant or a vowel.
# e.g. 'falafel' becomes 'cvcvcvc',
# 'architecture' becomes 'vcccvcvccvcv'
for i in range(len(stem)):
if self._is_consonant(stem, i):
cv_sequence += "c"
else:
cv_sequence += "v"
# Count the number of 'vc' occurrences, which is equivalent to
# the number of 'VC' occurrences in Porter's reduced form in the
# docstring above, which is in turn equivalent to `m`
return cv_sequence.count("vc")
def _has_positive_measure(self, stem):
return self._measure(stem) > 0
def _contains_vowel(self, stem):
"""Returns True if stem contains a vowel, else False"""
for i in range(len(stem)):
if not self._is_consonant(stem, i):
return True
return False
def _ends_double_consonant(self, word):
"""Implements condition *d from the paper
Returns True if word ends with a double consonant
"""
return (
len(word) >= 2
and word[-1] == word[-2]
and self._is_consonant(word, len(word) - 1)
)
def _ends_cvc(self, word):
"""Implements condition *o from the paper
From the paper:
*o - the stem ends cvc, where the second c is not W, X or Y
(e.g. -WIL, -HOP).
"""
return (
len(word) >= 3
and self._is_consonant(word, len(word) - 3)
and not self._is_consonant(word, len(word) - 2)
and self._is_consonant(word, len(word) - 1)
and word[-1] not in ("w", "x", "y")
) or (
self.mode == self.NLTK_EXTENSIONS
and len(word) == 2
and not self._is_consonant(word, 0)
and self._is_consonant(word, 1)
)
def _replace_suffix(self, word, suffix, replacement):
"""Replaces `suffix` of `word` with `replacement"""
assert word.endswith(suffix), "Given word doesn't end with given suffix"
if suffix == "":
return word + replacement
else:
return word[: -len(suffix)] + replacement
def _apply_rule_list(self, word, rules):
"""Applies the first applicable suffix-removal rule to the word
Takes a word and a list of suffix-removal rules represented as
3-tuples, with the first element being the suffix to remove,
the second element being the string to replace it with, and the
final element being the condition for the rule to be applicable,
or None if the rule is unconditional.
"""
for rule in rules:
suffix, replacement, condition = rule
if suffix == "*d" and self._ends_double_consonant(word):
stem = word[:-2]
if condition is None or condition(stem):
return stem + replacement
else:
# Don't try any further rules
return word
if word.endswith(suffix):
stem = self._replace_suffix(word, suffix, "")
if condition is None or condition(stem):
return stem + replacement
else:
# Don't try any further rules
return word
return word
def _step1a(self, word):
"""Implements Step 1a from "An algorithm for suffix stripping"
From the paper:
SSES -> SS caresses -> caress
IES -> I ponies -> poni
ties -> ti
SS -> SS caress -> caress
S -> cats -> cat
"""
# this NLTK-only rule extends the original algorithm, so
# that 'flies'->'fli' but 'dies'->'die' etc
if self.mode == self.NLTK_EXTENSIONS:
if word.endswith("ies") and len(word) == 4:
return self._replace_suffix(word, "ies", "ie")
return self._apply_rule_list(
word,
[
("sses", "ss", None), # SSES -> SS
("ies", "i", None), # IES -> I
("ss", "ss", None), # SS -> SS
("s", "", None), # S ->
],
)
def _step1b(self, word):
"""Implements Step 1b from "An algorithm for suffix stripping"
From the paper:
(m>0) EED -> EE feed -> feed
agreed -> agree
(*v*) ED -> plastered -> plaster
bled -> bled
(*v*) ING -> motoring -> motor
sing -> sing
If the second or third of the rules in Step 1b is successful,
the following is done:
AT -> ATE conflat(ed) -> conflate
BL -> BLE troubl(ed) -> trouble
IZ -> IZE siz(ed) -> size
(*d and not (*L or *S or *Z))
-> single letter
hopp(ing) -> hop
tann(ed) -> tan
fall(ing) -> fall
hiss(ing) -> hiss
fizz(ed) -> fizz
(m=1 and *o) -> E fail(ing) -> fail
fil(ing) -> file
The rule to map to a single letter causes the removal of one of
the double letter pair. The -E is put back on -AT, -BL and -IZ,
so that the suffixes -ATE, -BLE and -IZE can be recognised
later. This E may be removed in step 4.
"""
# this NLTK-only block extends the original algorithm, so that
# 'spied'->'spi' but 'died'->'die' etc
if self.mode == self.NLTK_EXTENSIONS:
if word.endswith("ied"):
if len(word) == 4:
return self._replace_suffix(word, "ied", "ie")
else:
return self._replace_suffix(word, "ied", "i")
# (m>0) EED -> EE
if word.endswith("eed"):
stem = self._replace_suffix(word, "eed", "")
if self._measure(stem) > 0:
return stem + "ee"
else:
return word
rule_2_or_3_succeeded = False
for suffix in ["ed", "ing"]:
if word.endswith(suffix):
intermediate_stem = self._replace_suffix(word, suffix, "")
if self._contains_vowel(intermediate_stem):
rule_2_or_3_succeeded = True
break
if not rule_2_or_3_succeeded:
return word
return self._apply_rule_list(
intermediate_stem,
[
("at", "ate", None), # AT -> ATE
("bl", "ble", None), # BL -> BLE
("iz", "ize", None), # IZ -> IZE
# (*d and not (*L or *S or *Z))
# -> single letter
(
"*d",
intermediate_stem[-1],
lambda stem: intermediate_stem[-1] not in ("l", "s", "z"),
),
# (m=1 and *o) -> E
(
"",
"e",
lambda stem: (self._measure(stem) == 1 and self._ends_cvc(stem)),
),
],
)
def _step1c(self, word):
"""Implements Step 1c from "An algorithm for suffix stripping"
From the paper:
Step 1c
(*v*) Y -> I happy -> happi
sky -> sky
"""
def nltk_condition(stem):
"""
This has been modified from the original Porter algorithm so
that y->i is only done when y is preceded by a consonant,
but not if the stem is only a single consonant, i.e.
(*c and not c) Y -> I
So 'happy' -> 'happi', but
'enjoy' -> 'enjoy' etc
This is a much better rule. Formerly 'enjoy'->'enjoi' and
'enjoyment'->'enjoy'. Step 1c is perhaps done too soon; but
with this modification that no longer really matters.
Also, the removal of the contains_vowel(z) condition means
that 'spy', 'fly', 'try' ... stem to 'spi', 'fli', 'tri' and
conflate with 'spied', 'tried', 'flies' ...
"""
return len(stem) > 1 and self._is_consonant(stem, len(stem) - 1)
def original_condition(stem):
return self._contains_vowel(stem)
return self._apply_rule_list(
word,
[
(
"y",
"i",
nltk_condition
if self.mode == self.NLTK_EXTENSIONS
else original_condition,
)
],
)
def _step2(self, word):
"""Implements Step 2 from "An algorithm for suffix stripping"
From the paper:
Step 2
(m>0) ATIONAL -> ATE relational -> relate
(m>0) TIONAL -> TION conditional -> condition
rational -> rational
(m>0) ENCI -> ENCE valenci -> valence
(m>0) ANCI -> ANCE hesitanci -> hesitance
(m>0) IZER -> IZE digitizer -> digitize
(m>0) ABLI -> ABLE conformabli -> conformable
(m>0) ALLI -> AL radicalli -> radical
(m>0) ENTLI -> ENT differentli -> different
(m>0) ELI -> E vileli - > vile
(m>0) OUSLI -> OUS analogousli -> analogous
(m>0) IZATION -> IZE vietnamization -> vietnamize
(m>0) ATION -> ATE predication -> predicate
(m>0) ATOR -> ATE operator -> operate
(m>0) ALISM -> AL feudalism -> feudal
(m>0) IVENESS -> IVE decisiveness -> decisive
(m>0) FULNESS -> FUL hopefulness -> hopeful
(m>0) OUSNESS -> OUS callousness -> callous
(m>0) ALITI -> AL formaliti -> formal
(m>0) IVITI -> IVE sensitiviti -> sensitive
(m>0) BILITI -> BLE sensibiliti -> sensible
"""
if self.mode == self.NLTK_EXTENSIONS:
# Instead of applying the ALLI -> AL rule after '(a)bli' per
# the published algorithm, instead we apply it first, and,
# if it succeeds, run the result through step2 again.
if word.endswith("alli") and self._has_positive_measure(
self._replace_suffix(word, "alli", "")
):
return self._step2(self._replace_suffix(word, "alli", "al"))
bli_rule = ("bli", "ble", self._has_positive_measure)
abli_rule = ("abli", "able", self._has_positive_measure)
rules = [
("ational", "ate", self._has_positive_measure),
("tional", "tion", self._has_positive_measure),
("enci", "ence", self._has_positive_measure),
("anci", "ance", self._has_positive_measure),
("izer", "ize", self._has_positive_measure),
abli_rule if self.mode == self.ORIGINAL_ALGORITHM else bli_rule,
("alli", "al", self._has_positive_measure),
("entli", "ent", self._has_positive_measure),
("eli", "e", self._has_positive_measure),
("ousli", "ous", self._has_positive_measure),
("ization", "ize", self._has_positive_measure),
("ation", "ate", self._has_positive_measure),
("ator", "ate", self._has_positive_measure),
("alism", "al", self._has_positive_measure),
("iveness", "ive", self._has_positive_measure),
("fulness", "ful", self._has_positive_measure),
("ousness", "ous", self._has_positive_measure),
("aliti", "al", self._has_positive_measure),
("iviti", "ive", self._has_positive_measure),
("biliti", "ble", self._has_positive_measure),
]
if self.mode == self.NLTK_EXTENSIONS:
rules.append(("fulli", "ful", self._has_positive_measure))
# The 'l' of the 'logi' -> 'log' rule is put with the stem,
# so that short stems like 'geo' 'theo' etc work like
# 'archaeo' 'philo' etc.
rules.append(
("logi", "log", lambda stem: self._has_positive_measure(word[:-3]))
)
if self.mode == self.MARTIN_EXTENSIONS:
rules.append(("logi", "log", self._has_positive_measure))
return self._apply_rule_list(word, rules)
def _step3(self, word):
"""Implements Step 3 from "An algorithm for suffix stripping"
From the paper:
Step 3
(m>0) ICATE -> IC triplicate -> triplic
(m>0) ATIVE -> formative -> form
(m>0) ALIZE -> AL formalize -> formal
(m>0) ICITI -> IC electriciti -> electric
(m>0) ICAL -> IC electrical -> electric
(m>0) FUL -> hopeful -> hope
(m>0) NESS -> goodness -> good
"""
return self._apply_rule_list(
word,
[
("icate", "ic", self._has_positive_measure),
("ative", "", self._has_positive_measure),
("alize", "al", self._has_positive_measure),
("iciti", "ic", self._has_positive_measure),
("ical", "ic", self._has_positive_measure),
("ful", "", self._has_positive_measure),
("ness", "", self._has_positive_measure),
],
)
def _step4(self, word):
"""Implements Step 4 from "An algorithm for suffix stripping"
Step 4
(m>1) AL -> revival -> reviv
(m>1) ANCE -> allowance -> allow
(m>1) ENCE -> inference -> infer
(m>1) ER -> airliner -> airlin
(m>1) IC -> gyroscopic -> gyroscop
(m>1) ABLE -> adjustable -> adjust
(m>1) IBLE -> defensible -> defens
(m>1) ANT -> irritant -> irrit
(m>1) EMENT -> replacement -> replac
(m>1) MENT -> adjustment -> adjust
(m>1) ENT -> dependent -> depend
(m>1 and (*S or *T)) ION -> adoption -> adopt
(m>1) OU -> homologou -> homolog
(m>1) ISM -> communism -> commun
(m>1) ATE -> activate -> activ
(m>1) ITI -> angulariti -> angular
(m>1) OUS -> homologous -> homolog
(m>1) IVE -> effective -> effect
(m>1) IZE -> bowdlerize -> bowdler
The suffixes are now removed. All that remains is a little
tidying up.
"""
measure_gt_1 = lambda stem: self._measure(stem) > 1
return self._apply_rule_list(
word,
[
("al", "", measure_gt_1),
("ance", "", measure_gt_1),
("ence", "", measure_gt_1),
("er", "", measure_gt_1),
("ic", "", measure_gt_1),
("able", "", measure_gt_1),
("ible", "", measure_gt_1),
("ant", "", measure_gt_1),
("ement", "", measure_gt_1),
("ment", "", measure_gt_1),
("ent", "", measure_gt_1),
# (m>1 and (*S or *T)) ION ->
(
"ion",
"",
lambda stem: self._measure(stem) > 1 and stem[-1] in ("s", "t"),
),
("ou", "", measure_gt_1),
("ism", "", measure_gt_1),
("ate", "", measure_gt_1),
("iti", "", measure_gt_1),
("ous", "", measure_gt_1),
("ive", "", measure_gt_1),
("ize", "", measure_gt_1),
],
)
def _step5a(self, word):
"""Implements Step 5a from "An algorithm for suffix stripping"
From the paper:
Step 5a
(m>1) E -> probate -> probat
rate -> rate
(m=1 and not *o) E -> cease -> ceas
"""
# Note that Martin's test vocabulary and reference
# implementations are inconsistent in how they handle the case
# where two rules both refer to a suffix that matches the word
# to be stemmed, but only the condition of the second one is
# true.
# Earlier in step2b we had the rules:
# (m>0) EED -> EE
# (*v*) ED ->
# but the examples in the paper included "feed"->"feed", even
# though (*v*) is true for "fe" and therefore the second rule
# alone would map "feed"->"fe".
# However, in THIS case, we need to handle the consecutive rules
# differently and try both conditions (obviously; the second
# rule here would be redundant otherwise). Martin's paper makes
# no explicit mention of the inconsistency; you have to infer it
# from the examples.
# For this reason, we can't use _apply_rule_list here.
if word.endswith("e"):
stem = self._replace_suffix(word, "e", "")
if self._measure(stem) > 1:
return stem
if self._measure(stem) == 1 and not self._ends_cvc(stem):
return stem
return word
def _step5b(self, word):
"""Implements Step 5a from "An algorithm for suffix stripping"
From the paper:
Step 5b
(m > 1 and *d and *L) -> single letter
controll -> control
roll -> roll
"""
return self._apply_rule_list(
word, [("ll", "l", lambda stem: self._measure(word[:-1]) > 1)]
)
def stem(self, word, to_lowercase=True):
"""
:param to_lowercase: if `to_lowercase=True` the word always lowercase
"""
stem = word.lower() if to_lowercase else word
if self.mode == self.NLTK_EXTENSIONS and word in self.pool:
return self.pool[stem]
if self.mode != self.ORIGINAL_ALGORITHM and len(word) <= 2:
# With this line, strings of length 1 or 2 don't go through
# the stemming process, although no mention is made of this
# in the published algorithm.
return stem
stem = self._step1a(stem)
stem = self._step1b(stem)
stem = self._step1c(stem)
stem = self._step2(stem)
stem = self._step3(stem)
stem = self._step4(stem)
stem = self._step5a(stem)
stem = self._step5b(stem)
return stem
def __repr__(self):
return "<PorterStemmer>"
The provided code snippet includes necessary dependencies for implementing the `meteor_score` function. Write a Python function `def meteor_score( references: Iterable[Iterable[str]], hypothesis: Iterable[str], preprocess: Callable[[str], str] = str.lower, stemmer: StemmerI = PorterStemmer(), wordnet: WordNetCorpusReader = wordnet, alpha: float = 0.9, beta: float = 3.0, gamma: float = 0.5, ) -> float` to solve the following problem:
Calculates METEOR score for hypothesis with multiple references as described in "Meteor: An Automatic Metric for MT Evaluation with HighLevels of Correlation with Human Judgments" by Alon Lavie and Abhaya Agarwal, in Proceedings of ACL. https://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf In case of multiple references the best score is chosen. This method iterates over single_meteor_score and picks the best pair among all the references for a given hypothesis >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures', 'that', 'the', 'military', 'always', 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', 'forever', 'hearing', 'the', 'activity', 'guidebook', 'that', 'party', 'direct'] >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', 'that', 'the', 'military', 'will', 'forever', 'heed', 'Party', 'commands'] >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', 'guarantees', 'the', 'military', 'forces', 'always', 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', 'army', 'always', 'to', 'heed', 'the', 'directions', 'of', 'the', 'party'] >>> round(meteor_score([reference1, reference2, reference3], hypothesis1),4) 0.6944 If there is no words match during the alignment the method returns the score as 0. We can safely return a zero instead of raising a division by zero error as no match usually implies a bad translation. >>> round(meteor_score([['this', 'is', 'a', 'cat']], ['non', 'matching', 'hypothesis']),4) 0.0 :param references: pre-tokenized reference sentences :param hypothesis: a pre-tokenized hypothesis sentence :param preprocess: preprocessing function (default str.lower) :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) :param alpha: parameter for controlling relative weights of precision and recall. :param beta: parameter for controlling shape of penalty as a function of as a function of fragmentation. :param gamma: relative weight assigned to fragmentation penalty. :return: The sentence-level METEOR score.
Here is the function:
def meteor_score(
references: Iterable[Iterable[str]],
hypothesis: Iterable[str],
preprocess: Callable[[str], str] = str.lower,
stemmer: StemmerI = PorterStemmer(),
wordnet: WordNetCorpusReader = wordnet,
alpha: float = 0.9,
beta: float = 3.0,
gamma: float = 0.5,
) -> float:
"""
Calculates METEOR score for hypothesis with multiple references as
described in "Meteor: An Automatic Metric for MT Evaluation with
HighLevels of Correlation with Human Judgments" by Alon Lavie and
Abhaya Agarwal, in Proceedings of ACL.
https://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf
In case of multiple references the best score is chosen. This method
iterates over single_meteor_score and picks the best pair among all
the references for a given hypothesis
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures', 'that', 'the', 'military', 'always', 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', 'forever', 'hearing', 'the', 'activity', 'guidebook', 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', 'that', 'the', 'military', 'will', 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', 'guarantees', 'the', 'military', 'forces', 'always', 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', 'army', 'always', 'to', 'heed', 'the', 'directions', 'of', 'the', 'party']
>>> round(meteor_score([reference1, reference2, reference3], hypothesis1),4)
0.6944
If there is no words match during the alignment the method returns the
score as 0. We can safely return a zero instead of raising a
division by zero error as no match usually implies a bad translation.
>>> round(meteor_score([['this', 'is', 'a', 'cat']], ['non', 'matching', 'hypothesis']),4)
0.0
:param references: pre-tokenized reference sentences
:param hypothesis: a pre-tokenized hypothesis sentence
:param preprocess: preprocessing function (default str.lower)
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:param alpha: parameter for controlling relative weights of precision and recall.
:param beta: parameter for controlling shape of penalty as a function
of as a function of fragmentation.
:param gamma: relative weight assigned to fragmentation penalty.
:return: The sentence-level METEOR score.
"""
return max(
single_meteor_score(
reference,
hypothesis,
preprocess=preprocess,
stemmer=stemmer,
wordnet=wordnet,
alpha=alpha,
beta=beta,
gamma=gamma,
)
for reference in references
) | Calculates METEOR score for hypothesis with multiple references as described in "Meteor: An Automatic Metric for MT Evaluation with HighLevels of Correlation with Human Judgments" by Alon Lavie and Abhaya Agarwal, in Proceedings of ACL. https://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf In case of multiple references the best score is chosen. This method iterates over single_meteor_score and picks the best pair among all the references for a given hypothesis >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures', 'that', 'the', 'military', 'always', 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', 'forever', 'hearing', 'the', 'activity', 'guidebook', 'that', 'party', 'direct'] >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', 'that', 'the', 'military', 'will', 'forever', 'heed', 'Party', 'commands'] >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', 'guarantees', 'the', 'military', 'forces', 'always', 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', 'army', 'always', 'to', 'heed', 'the', 'directions', 'of', 'the', 'party'] >>> round(meteor_score([reference1, reference2, reference3], hypothesis1),4) 0.6944 If there is no words match during the alignment the method returns the score as 0. We can safely return a zero instead of raising a division by zero error as no match usually implies a bad translation. >>> round(meteor_score([['this', 'is', 'a', 'cat']], ['non', 'matching', 'hypothesis']),4) 0.0 :param references: pre-tokenized reference sentences :param hypothesis: a pre-tokenized hypothesis sentence :param preprocess: preprocessing function (default str.lower) :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) :param alpha: parameter for controlling relative weights of precision and recall. :param beta: parameter for controlling shape of penalty as a function of as a function of fragmentation. :param gamma: relative weight assigned to fragmentation penalty. :return: The sentence-level METEOR score. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.