id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
170,610
import math from itertools import islice from nltk.util import choose, ngrams def sentence_ribes(references, hypothesis, alpha=0.25, beta=0.10): """ The RIBES (Rank-based Intuitive Bilingual Evaluation Score) from Hideki Isozaki, Tsutomu Hirao, Kevin Duh, Katsuhito Sudoh and Hajime Tsukada. 2010. "Automatic Evaluation of Translation Quality for Distant Language Pairs". In Proceedings of EMNLP. https://www.aclweb.org/anthology/D/D10/D10-1092.pdf The generic RIBES scores used in shared task, e.g. Workshop for Asian Translation (WAT) uses the following RIBES calculations: RIBES = kendall_tau * (alpha**p1) * (beta**bp) Please note that this re-implementation differs from the official RIBES implementation and though it emulates the results as describe in the original paper, there are further optimization implemented in the official RIBES script. Users are encouraged to use the official RIBES script instead of this implementation when evaluating your machine translation system. Refer to https://www.kecl.ntt.co.jp/icl/lirg/ribes/ for the official script. :param references: a list of reference sentences :type references: list(list(str)) :param hypothesis: a hypothesis sentence :type hypothesis: list(str) :param alpha: hyperparameter used as a prior for the unigram precision. :type alpha: float :param beta: hyperparameter used as a prior for the brevity penalty. :type beta: float :return: The best ribes score from one of the references. :rtype: float """ best_ribes = -1.0 # Calculates RIBES for each reference and returns the best score. for reference in references: # Collects the *worder* from the ranked correlation alignments. worder = word_rank_alignment(reference, hypothesis) nkt = kendall_tau(worder) # Calculates the brevity penalty bp = min(1.0, math.exp(1.0 - len(reference) / len(hypothesis))) # Calculates the unigram precision, *p1* p1 = len(worder) / len(hypothesis) _ribes = nkt * (p1**alpha) * (bp**beta) if _ribes > best_ribes: # Keeps the best score. best_ribes = _ribes return best_ribes The provided code snippet includes necessary dependencies for implementing the `corpus_ribes` function. Write a Python function `def corpus_ribes(list_of_references, hypotheses, alpha=0.25, beta=0.10)` to solve the following problem: This function "calculates RIBES for a system output (hypothesis) with multiple references, and returns "best" score among multi-references and individual scores. The scores are corpus-wise, i.e., averaged by the number of sentences." (c.f. RIBES version 1.03.1 code). Different from BLEU's micro-average precision, RIBES calculates the macro-average precision by averaging the best RIBES score for each pair of hypothesis and its corresponding references >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'military', 'always', ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', 'forever', ... 'heed', 'Party', 'commands'] >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> round(corpus_ribes(list_of_references, hypotheses),4) 0.3597 :param references: a corpus of lists of reference sentences, w.r.t. hypotheses :type references: list(list(list(str))) :param hypotheses: a list of hypothesis sentences :type hypotheses: list(list(str)) :param alpha: hyperparameter used as a prior for the unigram precision. :type alpha: float :param beta: hyperparameter used as a prior for the brevity penalty. :type beta: float :return: The best ribes score from one of the references. :rtype: float Here is the function: def corpus_ribes(list_of_references, hypotheses, alpha=0.25, beta=0.10): """ This function "calculates RIBES for a system output (hypothesis) with multiple references, and returns "best" score among multi-references and individual scores. The scores are corpus-wise, i.e., averaged by the number of sentences." (c.f. RIBES version 1.03.1 code). Different from BLEU's micro-average precision, RIBES calculates the macro-average precision by averaging the best RIBES score for each pair of hypothesis and its corresponding references >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'military', 'always', ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', 'forever', ... 'heed', 'Party', 'commands'] >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> round(corpus_ribes(list_of_references, hypotheses),4) 0.3597 :param references: a corpus of lists of reference sentences, w.r.t. hypotheses :type references: list(list(list(str))) :param hypotheses: a list of hypothesis sentences :type hypotheses: list(list(str)) :param alpha: hyperparameter used as a prior for the unigram precision. :type alpha: float :param beta: hyperparameter used as a prior for the brevity penalty. :type beta: float :return: The best ribes score from one of the references. :rtype: float """ corpus_best_ribes = 0.0 # Iterate through each hypothesis and their corresponding references. for references, hypothesis in zip(list_of_references, hypotheses): corpus_best_ribes += sentence_ribes(references, hypothesis, alpha, beta) return corpus_best_ribes / len(hypotheses)
This function "calculates RIBES for a system output (hypothesis) with multiple references, and returns "best" score among multi-references and individual scores. The scores are corpus-wise, i.e., averaged by the number of sentences." (c.f. RIBES version 1.03.1 code). Different from BLEU's micro-average precision, RIBES calculates the macro-average precision by averaging the best RIBES score for each pair of hypothesis and its corresponding references >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'military', 'always', ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', 'forever', ... 'heed', 'Party', 'commands'] >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> round(corpus_ribes(list_of_references, hypotheses),4) 0.3597 :param references: a corpus of lists of reference sentences, w.r.t. hypotheses :type references: list(list(list(str))) :param hypotheses: a list of hypothesis sentences :type hypotheses: list(list(str)) :param alpha: hyperparameter used as a prior for the unigram precision. :type alpha: float :param beta: hyperparameter used as a prior for the brevity penalty. :type beta: float :return: The best ribes score from one of the references. :rtype: float
170,611
import math from itertools import islice from nltk.util import choose, ngrams def choose(n, k): """ This function is a fast way to calculate binomial coefficients, commonly known as nCk, i.e. the number of combinations of n things taken k at a time. (https://en.wikipedia.org/wiki/Binomial_coefficient). This is the *scipy.special.comb()* with long integer computation but this approximation is faster, see https://github.com/nltk/nltk/issues/1181 >>> choose(4, 2) 6 >>> choose(6, 2) 15 :param n: The number of things. :type n: int :param r: The number of times a thing is taken. :type r: int """ if 0 <= k <= n: ntok, ktok = 1, 1 for t in range(1, min(k, n - k) + 1): ntok *= n ktok *= t n -= 1 return ntok // ktok else: return 0 The provided code snippet includes necessary dependencies for implementing the `spearman_rho` function. Write a Python function `def spearman_rho(worder, normalize=True)` to solve the following problem: Calculates the Spearman's Rho correlation coefficient given the *worder* list of word alignment from word_rank_alignment(), using the formula: rho = 1 - sum(d**2) / choose(len(worder)+1, 3) Given that d is the sum of difference between the *worder* list of indices and the original word indices from the reference sentence. Using the (H0,R0) and (H5, R5) example from the paper >>> worder = [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] >>> round(spearman_rho(worder, normalize=False), 3) -0.591 >>> round(spearman_rho(worder), 3) 0.205 :param worder: The worder list output from word_rank_alignment :param type: list(int) Here is the function: def spearman_rho(worder, normalize=True): """ Calculates the Spearman's Rho correlation coefficient given the *worder* list of word alignment from word_rank_alignment(), using the formula: rho = 1 - sum(d**2) / choose(len(worder)+1, 3) Given that d is the sum of difference between the *worder* list of indices and the original word indices from the reference sentence. Using the (H0,R0) and (H5, R5) example from the paper >>> worder = [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] >>> round(spearman_rho(worder, normalize=False), 3) -0.591 >>> round(spearman_rho(worder), 3) 0.205 :param worder: The worder list output from word_rank_alignment :param type: list(int) """ worder_len = len(worder) sum_d_square = sum((wi - i) ** 2 for wi, i in zip(worder, range(worder_len))) rho = 1 - sum_d_square / choose(worder_len + 1, 3) if normalize: # If normalized, the rho output falls between 0.0 to 1.0 return (rho + 1) / 2 else: # Otherwise, the rho outputs falls between -1.0 to +1.0 return rho
Calculates the Spearman's Rho correlation coefficient given the *worder* list of word alignment from word_rank_alignment(), using the formula: rho = 1 - sum(d**2) / choose(len(worder)+1, 3) Given that d is the sum of difference between the *worder* list of indices and the original word indices from the reference sentence. Using the (H0,R0) and (H5, R5) example from the paper >>> worder = [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] >>> round(spearman_rho(worder, normalize=False), 3) -0.591 >>> round(spearman_rho(worder), 3) 0.205 :param worder: The worder list output from word_rank_alignment :param type: list(int)
170,612
import math import sys import warnings from collections import Counter from fractions import Fraction from nltk.util import ngrams def corpus_bleu( list_of_references, hypotheses, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False, ): """ Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all the hypotheses and their respective references. Instead of averaging the sentence level BLEU scores (i.e. macro-average precision), the original BLEU metric (Papineni et al. 2002) accounts for the micro-average precision (i.e. summing the numerators and denominators for each hypothesis-reference(s) pairs before the division). >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'military', 'always', ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', 'forever', ... 'heed', 'Party', 'commands'] >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS 0.5920... The example below show that corpus_bleu() is different from averaging sentence_bleu() for hypotheses >>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1) >>> score2 = sentence_bleu([ref2a], hyp2) >>> (score1 + score2) / 2 # doctest: +ELLIPSIS 0.6223... Custom weights may be supplied to fine-tune the BLEU score further. A tuple of float weights for unigrams, bigrams, trigrams and so on can be given. >>> weights = (0.1, 0.3, 0.5, 0.1) >>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS 0.5818... This particular weight gave extra value to trigrams. Furthermore, multiple weights can be given, resulting in multiple BLEU scores. >>> weights = [ ... (0.5, 0.5), ... (0.333, 0.333, 0.334), ... (0.25, 0.25, 0.25, 0.25), ... (0.2, 0.2, 0.2, 0.2, 0.2) ... ] >>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS [0.8242..., 0.7067..., 0.5920..., 0.4719...] :param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses :type list_of_references: list(list(list(str))) :param hypotheses: a list of hypothesis sentences :type hypotheses: list(list(str)) :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights) :type weights: tuple(float) / list(tuple(float)) :param smoothing_function: :type smoothing_function: SmoothingFunction :param auto_reweigh: Option to re-normalize the weights uniformly. :type auto_reweigh: bool :return: The corpus-level BLEU score. :rtype: float """ # Before proceeding to compute BLEU, perform sanity checks. p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches. p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref. hyp_lengths, ref_lengths = 0, 0 assert len(list_of_references) == len(hypotheses), ( "The number of hypotheses and their reference(s) should be the " "same " ) try: weights[0][0] except TypeError: weights = [weights] max_weight_length = max(len(weight) for weight in weights) # Iterate through each hypothesis and their corresponding references. for references, hypothesis in zip(list_of_references, hypotheses): # For each order of ngram, calculate the numerator and # denominator for the corpus-level modified precision. for i in range(1, max_weight_length + 1): p_i = modified_precision(references, hypothesis, i) p_numerators[i] += p_i.numerator p_denominators[i] += p_i.denominator # Calculate the hypothesis length and the closest reference length. # Adds them to the corpus-level hypothesis and reference counts. hyp_len = len(hypothesis) hyp_lengths += hyp_len ref_lengths += closest_ref_length(references, hyp_len) # Calculate corpus-level brevity penalty. bp = brevity_penalty(ref_lengths, hyp_lengths) # Collects the various precision values for the different ngram orders. p_n = [ Fraction(p_numerators[i], p_denominators[i], _normalize=False) for i in range(1, max_weight_length + 1) ] # Returns 0 if there's no matching n-grams # We only need to check for p_numerators[1] == 0, since if there's # no unigrams, there won't be any higher order ngrams. if p_numerators[1] == 0: return 0 if len(weights) == 1 else [0] * len(weights) # If there's no smoothing, set use method0 from SmoothinFunction class. if not smoothing_function: smoothing_function = SmoothingFunction().method0 # Smoothen the modified precision. # Note: smoothing_function() may convert values into floats; # it tries to retain the Fraction object as much as the # smoothing method allows. p_n = smoothing_function( p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths ) bleu_scores = [] for weight in weights: # Uniformly re-weighting based on maximum hypothesis lengths if largest # order of n-grams < 4 and weights is set at default. if auto_reweigh: if hyp_lengths < 4 and weight == (0.25, 0.25, 0.25, 0.25): weight = (1 / hyp_lengths,) * hyp_lengths s = (w_i * math.log(p_i) for w_i, p_i in zip(weight, p_n) if p_i > 0) s = bp * math.exp(math.fsum(s)) bleu_scores.append(s) return bleu_scores[0] if len(weights) == 1 else bleu_scores The provided code snippet includes necessary dependencies for implementing the `sentence_bleu` function. Write a Python function `def sentence_bleu( references, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False, )` to solve the following problem: Calculate BLEU score (Bilingual Evaluation Understudy) from Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. "BLEU: a method for automatic evaluation of machine translation." In Proceedings of ACL. https://www.aclweb.org/anthology/P02-1040.pdf >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'military', 'always', ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', ... 'forever', 'hearing', 'the', 'activity', 'guidebook', ... 'that', 'party', 'direct'] >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', 'forever', ... 'heed', 'Party', 'commands'] >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', ... 'Party'] >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS 0.5045... If there is no ngrams overlap for any order of n-grams, BLEU returns the value 0. This is because the precision for the order of n-grams without overlap is 0, and the geometric mean in the final BLEU score computation multiplies the 0 with the precision of other n-grams. This results in 0 (independently of the precision of the other n-gram orders). The following example has zero 3-gram and 4-gram overlaps: >>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS 0.0 To avoid this harsh behaviour when no ngram overlaps are found a smoothing function can be used. >>> chencherry = SmoothingFunction() >>> sentence_bleu([reference1, reference2, reference3], hypothesis2, ... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS 0.0370... The default BLEU calculates a score for up to 4-grams using uniform weights (this is called BLEU-4). To evaluate your translations with higher/lower order ngrams, use customized weights. E.g. when accounting for up to 5-grams with uniform weights (this is called BLEU-5) use: >>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.) >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS 0.3920... Multiple BLEU scores can be computed at once, by supplying a list of weights. E.g. for computing BLEU-2, BLEU-3 *and* BLEU-4 in one computation, use: >>> weights = [ ... (1./2., 1./2.), ... (1./3., 1./3., 1./3.), ... (1./4., 1./4., 1./4., 1./4.) ... ] >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS [0.7453..., 0.6240..., 0.5045...] :param references: reference sentences :type references: list(list(str)) :param hypothesis: a hypothesis sentence :type hypothesis: list(str) :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights) :type weights: tuple(float) / list(tuple(float)) :param smoothing_function: :type smoothing_function: SmoothingFunction :param auto_reweigh: Option to re-normalize the weights uniformly. :type auto_reweigh: bool :return: The sentence-level BLEU score. Returns a list if multiple weights were supplied. :rtype: float / list(float) Here is the function: def sentence_bleu( references, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False, ): """ Calculate BLEU score (Bilingual Evaluation Understudy) from Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. "BLEU: a method for automatic evaluation of machine translation." In Proceedings of ACL. https://www.aclweb.org/anthology/P02-1040.pdf >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'military', 'always', ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', ... 'forever', 'hearing', 'the', 'activity', 'guidebook', ... 'that', 'party', 'direct'] >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', 'forever', ... 'heed', 'Party', 'commands'] >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', ... 'Party'] >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS 0.5045... If there is no ngrams overlap for any order of n-grams, BLEU returns the value 0. This is because the precision for the order of n-grams without overlap is 0, and the geometric mean in the final BLEU score computation multiplies the 0 with the precision of other n-grams. This results in 0 (independently of the precision of the other n-gram orders). The following example has zero 3-gram and 4-gram overlaps: >>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS 0.0 To avoid this harsh behaviour when no ngram overlaps are found a smoothing function can be used. >>> chencherry = SmoothingFunction() >>> sentence_bleu([reference1, reference2, reference3], hypothesis2, ... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS 0.0370... The default BLEU calculates a score for up to 4-grams using uniform weights (this is called BLEU-4). To evaluate your translations with higher/lower order ngrams, use customized weights. E.g. when accounting for up to 5-grams with uniform weights (this is called BLEU-5) use: >>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.) >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS 0.3920... Multiple BLEU scores can be computed at once, by supplying a list of weights. E.g. for computing BLEU-2, BLEU-3 *and* BLEU-4 in one computation, use: >>> weights = [ ... (1./2., 1./2.), ... (1./3., 1./3., 1./3.), ... (1./4., 1./4., 1./4., 1./4.) ... ] >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS [0.7453..., 0.6240..., 0.5045...] :param references: reference sentences :type references: list(list(str)) :param hypothesis: a hypothesis sentence :type hypothesis: list(str) :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights) :type weights: tuple(float) / list(tuple(float)) :param smoothing_function: :type smoothing_function: SmoothingFunction :param auto_reweigh: Option to re-normalize the weights uniformly. :type auto_reweigh: bool :return: The sentence-level BLEU score. Returns a list if multiple weights were supplied. :rtype: float / list(float) """ return corpus_bleu( [references], [hypothesis], weights, smoothing_function, auto_reweigh )
Calculate BLEU score (Bilingual Evaluation Understudy) from Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. "BLEU: a method for automatic evaluation of machine translation." In Proceedings of ACL. https://www.aclweb.org/anthology/P02-1040.pdf >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'military', 'always', ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', ... 'forever', 'hearing', 'the', 'activity', 'guidebook', ... 'that', 'party', 'direct'] >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', 'forever', ... 'heed', 'Party', 'commands'] >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', ... 'Party'] >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS 0.5045... If there is no ngrams overlap for any order of n-grams, BLEU returns the value 0. This is because the precision for the order of n-grams without overlap is 0, and the geometric mean in the final BLEU score computation multiplies the 0 with the precision of other n-grams. This results in 0 (independently of the precision of the other n-gram orders). The following example has zero 3-gram and 4-gram overlaps: >>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS 0.0 To avoid this harsh behaviour when no ngram overlaps are found a smoothing function can be used. >>> chencherry = SmoothingFunction() >>> sentence_bleu([reference1, reference2, reference3], hypothesis2, ... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS 0.0370... The default BLEU calculates a score for up to 4-grams using uniform weights (this is called BLEU-4). To evaluate your translations with higher/lower order ngrams, use customized weights. E.g. when accounting for up to 5-grams with uniform weights (this is called BLEU-5) use: >>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.) >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS 0.3920... Multiple BLEU scores can be computed at once, by supplying a list of weights. E.g. for computing BLEU-2, BLEU-3 *and* BLEU-4 in one computation, use: >>> weights = [ ... (1./2., 1./2.), ... (1./3., 1./3., 1./3.), ... (1./4., 1./4., 1./4., 1./4.) ... ] >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS [0.7453..., 0.6240..., 0.5045...] :param references: reference sentences :type references: list(list(str)) :param hypothesis: a hypothesis sentence :type hypothesis: list(str) :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights) :type weights: tuple(float) / list(tuple(float)) :param smoothing_function: :type smoothing_function: SmoothingFunction :param auto_reweigh: Option to re-normalize the weights uniformly. :type auto_reweigh: bool :return: The sentence-level BLEU score. Returns a list if multiple weights were supplied. :rtype: float / list(float)
170,613
import fractions import math from collections import Counter from nltk.util import ngrams def corpus_nist(list_of_references, hypotheses, n=5): """ Calculate a single corpus-level NIST score (aka. system-level BLEU) for all the hypotheses and their respective references. :param references: a corpus of lists of reference sentences, w.r.t. hypotheses :type references: list(list(list(str))) :param hypotheses: a list of hypothesis sentences :type hypotheses: list(list(str)) :param n: highest n-gram order :type n: int """ # Before proceeding to compute NIST, perform sanity checks. assert len(list_of_references) == len( hypotheses ), "The number of hypotheses and their reference(s) should be the same" # Collect the ngram coounts from the reference sentences. ngram_freq = Counter() total_reference_words = 0 for ( references ) in list_of_references: # For each source sent, there's a list of reference sents. for reference in references: # For each order of ngram, count the ngram occurrences. for i in range(1, n + 1): ngram_freq.update(ngrams(reference, i)) total_reference_words += len(reference) # Compute the information weights based on the reference sentences. # Eqn 2 in Doddington (2002): # Info(w_1 ... w_n) = log_2 [ (# of occurrences of w_1 ... w_n-1) / (# of occurrences of w_1 ... w_n) ] information_weights = {} for _ngram in ngram_freq: # w_1 ... w_n _mgram = _ngram[:-1] # w_1 ... w_n-1 # From https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v13a.pl#L546 # it's computed as such: # denominator = ngram_freq[_mgram] if _mgram and _mgram in ngram_freq else denominator = total_reference_words # information_weights[_ngram] = -1 * math.log(ngram_freq[_ngram]/denominator) / math.log(2) # # Mathematically, it's equivalent to the our implementation: if _mgram and _mgram in ngram_freq: numerator = ngram_freq[_mgram] else: numerator = total_reference_words information_weights[_ngram] = math.log(numerator / ngram_freq[_ngram], 2) # Micro-average. nist_precision_numerator_per_ngram = Counter() nist_precision_denominator_per_ngram = Counter() l_ref, l_sys = 0, 0 # For each order of ngram. for i in range(1, n + 1): # Iterate through each hypothesis and their corresponding references. for references, hypothesis in zip(list_of_references, hypotheses): hyp_len = len(hypothesis) # Find reference with the best NIST score. nist_score_per_ref = [] for reference in references: _ref_len = len(reference) # Counter of ngrams in hypothesis. hyp_ngrams = ( Counter(ngrams(hypothesis, i)) if len(hypothesis) >= i else Counter() ) ref_ngrams = ( Counter(ngrams(reference, i)) if len(reference) >= i else Counter() ) ngram_overlaps = hyp_ngrams & ref_ngrams # Precision part of the score in Eqn 3 _numerator = sum( information_weights[_ngram] * count for _ngram, count in ngram_overlaps.items() ) _denominator = sum(hyp_ngrams.values()) _precision = 0 if _denominator == 0 else _numerator / _denominator nist_score_per_ref.append( (_precision, _numerator, _denominator, _ref_len) ) # Best reference. precision, numerator, denominator, ref_len = max(nist_score_per_ref) nist_precision_numerator_per_ngram[i] += numerator nist_precision_denominator_per_ngram[i] += denominator l_ref += ref_len l_sys += hyp_len # Final NIST micro-average mean aggregation. nist_precision = 0 for i in nist_precision_numerator_per_ngram: precision = ( nist_precision_numerator_per_ngram[i] / nist_precision_denominator_per_ngram[i] ) nist_precision += precision # Eqn 3 in Doddington(2002) return nist_precision * nist_length_penalty(l_ref, l_sys) The provided code snippet includes necessary dependencies for implementing the `sentence_nist` function. Write a Python function `def sentence_nist(references, hypothesis, n=5)` to solve the following problem: Calculate NIST score from George Doddington. 2002. "Automatic evaluation of machine translation quality using n-gram co-occurrence statistics." Proceedings of HLT. Morgan Kaufmann Publishers Inc. https://dl.acm.org/citation.cfm?id=1289189.1289273 DARPA commissioned NIST to develop an MT evaluation facility based on the BLEU score. The official script used by NIST to compute BLEU and NIST score is mteval-14.pl. The main differences are: - BLEU uses geometric mean of the ngram overlaps, NIST uses arithmetic mean. - NIST has a different brevity penalty - NIST score from mteval-14.pl has a self-contained tokenizer Note: The mteval-14.pl includes a smoothing function for BLEU score that is NOT used in the NIST score computation. >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'military', 'always', ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', ... 'forever', 'hearing', 'the', 'activity', 'guidebook', ... 'that', 'party', 'direct'] >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', 'forever', ... 'heed', 'Party', 'commands'] >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', ... 'Party'] >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> sentence_nist([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS 3.3709... >>> sentence_nist([reference1, reference2, reference3], hypothesis2) # doctest: +ELLIPSIS 1.4619... :param references: reference sentences :type references: list(list(str)) :param hypothesis: a hypothesis sentence :type hypothesis: list(str) :param n: highest n-gram order :type n: int Here is the function: def sentence_nist(references, hypothesis, n=5): """ Calculate NIST score from George Doddington. 2002. "Automatic evaluation of machine translation quality using n-gram co-occurrence statistics." Proceedings of HLT. Morgan Kaufmann Publishers Inc. https://dl.acm.org/citation.cfm?id=1289189.1289273 DARPA commissioned NIST to develop an MT evaluation facility based on the BLEU score. The official script used by NIST to compute BLEU and NIST score is mteval-14.pl. The main differences are: - BLEU uses geometric mean of the ngram overlaps, NIST uses arithmetic mean. - NIST has a different brevity penalty - NIST score from mteval-14.pl has a self-contained tokenizer Note: The mteval-14.pl includes a smoothing function for BLEU score that is NOT used in the NIST score computation. >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'military', 'always', ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', ... 'forever', 'hearing', 'the', 'activity', 'guidebook', ... 'that', 'party', 'direct'] >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', 'forever', ... 'heed', 'Party', 'commands'] >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', ... 'Party'] >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> sentence_nist([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS 3.3709... >>> sentence_nist([reference1, reference2, reference3], hypothesis2) # doctest: +ELLIPSIS 1.4619... :param references: reference sentences :type references: list(list(str)) :param hypothesis: a hypothesis sentence :type hypothesis: list(str) :param n: highest n-gram order :type n: int """ return corpus_nist([references], [hypothesis], n)
Calculate NIST score from George Doddington. 2002. "Automatic evaluation of machine translation quality using n-gram co-occurrence statistics." Proceedings of HLT. Morgan Kaufmann Publishers Inc. https://dl.acm.org/citation.cfm?id=1289189.1289273 DARPA commissioned NIST to develop an MT evaluation facility based on the BLEU score. The official script used by NIST to compute BLEU and NIST score is mteval-14.pl. The main differences are: - BLEU uses geometric mean of the ngram overlaps, NIST uses arithmetic mean. - NIST has a different brevity penalty - NIST score from mteval-14.pl has a self-contained tokenizer Note: The mteval-14.pl includes a smoothing function for BLEU score that is NOT used in the NIST score computation. >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'military', 'always', ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', ... 'forever', 'hearing', 'the', 'activity', 'guidebook', ... 'that', 'party', 'direct'] >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', 'forever', ... 'heed', 'Party', 'commands'] >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', ... 'Party'] >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> sentence_nist([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS 3.3709... >>> sentence_nist([reference1, reference2, reference3], hypothesis2) # doctest: +ELLIPSIS 1.4619... :param references: reference sentences :type references: list(list(str)) :param hypothesis: a hypothesis sentence :type hypothesis: list(str) :param n: highest n-gram order :type n: int
170,614
from collections import defaultdict class defaultdict(Dict[_KT, _VT], Generic[_KT, _VT]): default_factory: Callable[[], _VT] def __init__(self, **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]]) -> None: ... def __init__( self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT ) -> None: ... def __missing__(self, key: _KT) -> _VT: ... def copy(self: _S) -> _S: ... The provided code snippet includes necessary dependencies for implementing the `grow_diag_final_and` function. Write a Python function `def grow_diag_final_and(srclen, trglen, e2f, f2e)` to solve the following problem: This module symmetrisatizes the source-to-target and target-to-source word alignment output and produces, aka. GDFA algorithm (Koehn, 2005). Step 1: Find the intersection of the bidirectional alignment. Step 2: Search for additional neighbor alignment points to be added, given these criteria: (i) neighbor alignments points are not in the intersection and (ii) neighbor alignments are in the union. Step 3: Add all other alignment points that are not in the intersection, not in the neighboring alignments that met the criteria but in the original forward/backward alignment outputs. >>> forw = ('0-0 2-1 9-2 21-3 10-4 7-5 11-6 9-7 12-8 1-9 3-10 ' ... '4-11 17-12 17-13 25-14 13-15 24-16 11-17 28-18') >>> back = ('0-0 1-9 2-9 3-10 4-11 5-12 6-6 7-5 8-6 9-7 10-4 ' ... '11-6 12-8 13-12 15-12 17-13 18-13 19-12 20-13 ' ... '21-3 22-12 23-14 24-17 25-15 26-17 27-18 28-18') >>> srctext = ("この よう な ハロー 白色 わい 星 の L 関数 " ... "は L と 共 に 不連続 に 増加 する こと が " ... "期待 さ れる こと を 示し た 。") >>> trgtext = ("Therefore , we expect that the luminosity function " ... "of such halo white dwarfs increases discontinuously " ... "with the luminosity .") >>> srclen = len(srctext.split()) >>> trglen = len(trgtext.split()) >>> >>> gdfa = grow_diag_final_and(srclen, trglen, forw, back) >>> gdfa == sorted(set([(28, 18), (6, 6), (24, 17), (2, 1), (15, 12), (13, 12), ... (2, 9), (3, 10), (26, 17), (25, 15), (8, 6), (9, 7), (20, ... 13), (18, 13), (0, 0), (10, 4), (13, 15), (23, 14), (7, 5), ... (25, 14), (1, 9), (17, 13), (4, 11), (11, 17), (9, 2), (22, ... 12), (27, 18), (24, 16), (21, 3), (19, 12), (17, 12), (5, ... 12), (11, 6), (12, 8)])) True References: Koehn, P., A. Axelrod, A. Birch, C. Callison, M. Osborne, and D. Talbot. 2005. Edinburgh System Description for the 2005 IWSLT Speech Translation Evaluation. In MT Eval Workshop. :type srclen: int :param srclen: the number of tokens in the source language :type trglen: int :param trglen: the number of tokens in the target language :type e2f: str :param e2f: the forward word alignment outputs from source-to-target language (in pharaoh output format) :type f2e: str :param f2e: the backward word alignment outputs from target-to-source language (in pharaoh output format) :rtype: set(tuple(int)) :return: the symmetrized alignment points from the GDFA algorithm Here is the function: def grow_diag_final_and(srclen, trglen, e2f, f2e): """ This module symmetrisatizes the source-to-target and target-to-source word alignment output and produces, aka. GDFA algorithm (Koehn, 2005). Step 1: Find the intersection of the bidirectional alignment. Step 2: Search for additional neighbor alignment points to be added, given these criteria: (i) neighbor alignments points are not in the intersection and (ii) neighbor alignments are in the union. Step 3: Add all other alignment points that are not in the intersection, not in the neighboring alignments that met the criteria but in the original forward/backward alignment outputs. >>> forw = ('0-0 2-1 9-2 21-3 10-4 7-5 11-6 9-7 12-8 1-9 3-10 ' ... '4-11 17-12 17-13 25-14 13-15 24-16 11-17 28-18') >>> back = ('0-0 1-9 2-9 3-10 4-11 5-12 6-6 7-5 8-6 9-7 10-4 ' ... '11-6 12-8 13-12 15-12 17-13 18-13 19-12 20-13 ' ... '21-3 22-12 23-14 24-17 25-15 26-17 27-18 28-18') >>> srctext = ("この よう な ハロー 白色 わい 星 の L 関数 " ... "は L と 共 に 不連続 に 増加 する こと が " ... "期待 さ れる こと を 示し た 。") >>> trgtext = ("Therefore , we expect that the luminosity function " ... "of such halo white dwarfs increases discontinuously " ... "with the luminosity .") >>> srclen = len(srctext.split()) >>> trglen = len(trgtext.split()) >>> >>> gdfa = grow_diag_final_and(srclen, trglen, forw, back) >>> gdfa == sorted(set([(28, 18), (6, 6), (24, 17), (2, 1), (15, 12), (13, 12), ... (2, 9), (3, 10), (26, 17), (25, 15), (8, 6), (9, 7), (20, ... 13), (18, 13), (0, 0), (10, 4), (13, 15), (23, 14), (7, 5), ... (25, 14), (1, 9), (17, 13), (4, 11), (11, 17), (9, 2), (22, ... 12), (27, 18), (24, 16), (21, 3), (19, 12), (17, 12), (5, ... 12), (11, 6), (12, 8)])) True References: Koehn, P., A. Axelrod, A. Birch, C. Callison, M. Osborne, and D. Talbot. 2005. Edinburgh System Description for the 2005 IWSLT Speech Translation Evaluation. In MT Eval Workshop. :type srclen: int :param srclen: the number of tokens in the source language :type trglen: int :param trglen: the number of tokens in the target language :type e2f: str :param e2f: the forward word alignment outputs from source-to-target language (in pharaoh output format) :type f2e: str :param f2e: the backward word alignment outputs from target-to-source language (in pharaoh output format) :rtype: set(tuple(int)) :return: the symmetrized alignment points from the GDFA algorithm """ # Converts pharaoh text format into list of tuples. e2f = [tuple(map(int, a.split("-"))) for a in e2f.split()] f2e = [tuple(map(int, a.split("-"))) for a in f2e.split()] neighbors = [(-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)] alignment = set(e2f).intersection(set(f2e)) # Find the intersection. union = set(e2f).union(set(f2e)) # *aligned* is used to check if neighbors are aligned in grow_diag() aligned = defaultdict(set) for i, j in alignment: aligned["e"].add(i) aligned["f"].add(j) def grow_diag(): """ Search for the neighbor points and them to the intersected alignment points if criteria are met. """ prev_len = len(alignment) - 1 # iterate until no new points added while prev_len < len(alignment): no_new_points = True # for english word e = 0 ... en for e in range(srclen): # for foreign word f = 0 ... fn for f in range(trglen): # if ( e aligned with f) if (e, f) in alignment: # for each neighboring point (e-new, f-new) for neighbor in neighbors: neighbor = tuple(i + j for i, j in zip((e, f), neighbor)) e_new, f_new = neighbor # if ( ( e-new not aligned and f-new not aligned) # and (e-new, f-new in union(e2f, f2e) ) if ( e_new not in aligned and f_new not in aligned ) and neighbor in union: alignment.add(neighbor) aligned["e"].add(e_new) aligned["f"].add(f_new) prev_len += 1 no_new_points = False # iterate until no new points added if no_new_points: break def final_and(a): """ Adds remaining points that are not in the intersection, not in the neighboring alignments but in the original *e2f* and *f2e* alignments """ # for english word e = 0 ... en for e_new in range(srclen): # for foreign word f = 0 ... fn for f_new in range(trglen): # if ( ( e-new not aligned and f-new not aligned) # and (e-new, f-new in union(e2f, f2e) ) if ( e_new not in aligned and f_new not in aligned and (e_new, f_new) in union ): alignment.add((e_new, f_new)) aligned["e"].add(e_new) aligned["f"].add(f_new) grow_diag() final_and(e2f) final_and(f2e) return sorted(alignment)
This module symmetrisatizes the source-to-target and target-to-source word alignment output and produces, aka. GDFA algorithm (Koehn, 2005). Step 1: Find the intersection of the bidirectional alignment. Step 2: Search for additional neighbor alignment points to be added, given these criteria: (i) neighbor alignments points are not in the intersection and (ii) neighbor alignments are in the union. Step 3: Add all other alignment points that are not in the intersection, not in the neighboring alignments that met the criteria but in the original forward/backward alignment outputs. >>> forw = ('0-0 2-1 9-2 21-3 10-4 7-5 11-6 9-7 12-8 1-9 3-10 ' ... '4-11 17-12 17-13 25-14 13-15 24-16 11-17 28-18') >>> back = ('0-0 1-9 2-9 3-10 4-11 5-12 6-6 7-5 8-6 9-7 10-4 ' ... '11-6 12-8 13-12 15-12 17-13 18-13 19-12 20-13 ' ... '21-3 22-12 23-14 24-17 25-15 26-17 27-18 28-18') >>> srctext = ("この よう な ハロー 白色 わい 星 の L 関数 " ... "は L と 共 に 不連続 に 増加 する こと が " ... "期待 さ れる こと を 示し た 。") >>> trgtext = ("Therefore , we expect that the luminosity function " ... "of such halo white dwarfs increases discontinuously " ... "with the luminosity .") >>> srclen = len(srctext.split()) >>> trglen = len(trgtext.split()) >>> >>> gdfa = grow_diag_final_and(srclen, trglen, forw, back) >>> gdfa == sorted(set([(28, 18), (6, 6), (24, 17), (2, 1), (15, 12), (13, 12), ... (2, 9), (3, 10), (26, 17), (25, 15), (8, 6), (9, 7), (20, ... 13), (18, 13), (0, 0), (10, 4), (13, 15), (23, 14), (7, 5), ... (25, 14), (1, 9), (17, 13), (4, 11), (11, 17), (9, 2), (22, ... 12), (27, 18), (24, 16), (21, 3), (19, 12), (17, 12), (5, ... 12), (11, 6), (12, 8)])) True References: Koehn, P., A. Axelrod, A. Birch, C. Callison, M. Osborne, and D. Talbot. 2005. Edinburgh System Description for the 2005 IWSLT Speech Translation Evaluation. In MT Eval Workshop. :type srclen: int :param srclen: the number of tokens in the source language :type trglen: int :param trglen: the number of tokens in the target language :type e2f: str :param e2f: the forward word alignment outputs from source-to-target language (in pharaoh output format) :type f2e: str :param f2e: the backward word alignment outputs from target-to-source language (in pharaoh output format) :rtype: set(tuple(int)) :return: the symmetrized alignment points from the GDFA algorithm
170,615
import math class LanguageIndependent: # These are the language-independent probabilities and parameters # given in Gale & Church # for the computation, l_1 is always the language with less characters PRIORS = { (1, 0): 0.0099, (0, 1): 0.0099, (1, 1): 0.89, (2, 1): 0.089, (1, 2): 0.089, (2, 2): 0.011, } AVERAGE_CHARACTERS = 1 VARIANCE_CHARACTERS = 6.8 def align_blocks(source_sents_lens, target_sents_lens, params=LanguageIndependent): """Return the sentence alignment of two text blocks (usually paragraphs). >>> align_blocks([5,5,5], [7,7,7]) [(0, 0), (1, 1), (2, 2)] >>> align_blocks([10,5,5], [12,20]) [(0, 0), (1, 1), (2, 1)] >>> align_blocks([12,20], [10,5,5]) [(0, 0), (1, 1), (1, 2)] >>> align_blocks([10,2,10,10,2,10], [12,3,20,3,12]) [(0, 0), (1, 1), (2, 2), (3, 2), (4, 3), (5, 4)] """ alignment_types = list(params.PRIORS.keys()) # there are always three rows in the history (with the last of them being filled) D = [[]] backlinks = {} for i in range(len(source_sents_lens) + 1): for j in range(len(target_sents_lens) + 1): min_dist = float("inf") min_align = None for a in alignment_types: prev_i = -1 - a[0] prev_j = j - a[1] if prev_i < -len(D) or prev_j < 0: continue p = D[prev_i][prev_j] + align_log_prob( i, j, source_sents_lens, target_sents_lens, a, params ) if p < min_dist: min_dist = p min_align = a if min_dist == float("inf"): min_dist = 0 backlinks[(i, j)] = min_align D[-1].append(min_dist) if len(D) > 2: D.pop(0) D.append([]) return trace(backlinks, source_sents_lens, target_sents_lens) The provided code snippet includes necessary dependencies for implementing the `align_texts` function. Write a Python function `def align_texts(source_blocks, target_blocks, params=LanguageIndependent)` to solve the following problem: Creates the sentence alignment of two texts. Texts can consist of several blocks. Block boundaries cannot be crossed by sentence alignment links. Each block consists of a list that contains the lengths (in characters) of the sentences in this block. @param source_blocks: The list of blocks in the source text. @param target_blocks: The list of blocks in the target text. @param params: the sentence alignment parameters. @returns: A list of sentence alignment lists Here is the function: def align_texts(source_blocks, target_blocks, params=LanguageIndependent): """Creates the sentence alignment of two texts. Texts can consist of several blocks. Block boundaries cannot be crossed by sentence alignment links. Each block consists of a list that contains the lengths (in characters) of the sentences in this block. @param source_blocks: The list of blocks in the source text. @param target_blocks: The list of blocks in the target text. @param params: the sentence alignment parameters. @returns: A list of sentence alignment lists """ if len(source_blocks) != len(target_blocks): raise ValueError( "Source and target texts do not have the same number of blocks." ) return [ align_blocks(source_block, target_block, params) for source_block, target_block in zip(source_blocks, target_blocks) ]
Creates the sentence alignment of two texts. Texts can consist of several blocks. Block boundaries cannot be crossed by sentence alignment links. Each block consists of a list that contains the lengths (in characters) of the sentences in this block. @param source_blocks: The list of blocks in the source text. @param target_blocks: The list of blocks in the target text. @param params: the sentence alignment parameters. @returns: A list of sentence alignment lists
170,616
import math def split_at(it, split_value): """Splits an iterator C{it} at values of C{split_value}. Each instance of C{split_value} is swallowed. The iterator produces subiterators which need to be consumed fully before the next subiterator can be used. """ def _chunk_iterator(first): v = first while v != split_value: yield v v = it.next() while True: yield _chunk_iterator(it.next()) The provided code snippet includes necessary dependencies for implementing the `parse_token_stream` function. Write a Python function `def parse_token_stream(stream, soft_delimiter, hard_delimiter)` to solve the following problem: Parses a stream of tokens and splits it into sentences (using C{soft_delimiter} tokens) and blocks (using C{hard_delimiter} tokens) for use with the L{align_texts} function. Here is the function: def parse_token_stream(stream, soft_delimiter, hard_delimiter): """Parses a stream of tokens and splits it into sentences (using C{soft_delimiter} tokens) and blocks (using C{hard_delimiter} tokens) for use with the L{align_texts} function. """ return [ [ sum(len(token) for token in sentence_it) for sentence_it in split_at(block_it, soft_delimiter) ] for block_it in split_at(stream, hard_delimiter) ]
Parses a stream of tokens and splits it into sentences (using C{soft_delimiter} tokens) and blocks (using C{hard_delimiter} tokens) for use with the L{align_texts} function.
170,617
import re from collections import Counter, defaultdict from nltk.util import ngrams def corpus_chrf( references, hypotheses, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True ): """ Calculates the corpus level CHRF (Character n-gram F-score), it is the macro-averaged value of the sentence/segment level CHRF score. This implementation of CHRF only supports a single reference at the moment. >>> ref1 = str('It is a guide to action that ensures that the military ' ... 'will forever heed Party commands').split() >>> ref2 = str('It is the guiding principle which guarantees the military ' ... 'forces always being under the command of the Party').split() >>> >>> hyp1 = str('It is a guide to action which ensures that the military ' ... 'always obeys the commands of the party').split() >>> hyp2 = str('It is to insure the troops forever hearing the activity ' ... 'guidebook that party direct') >>> corpus_chrf([ref1, ref2, ref1, ref2], [hyp1, hyp2, hyp2, hyp1]) # doctest: +ELLIPSIS 0.3910... :param references: a corpus of list of reference sentences, w.r.t. hypotheses :type references: list(list(str)) :param hypotheses: a list of hypothesis sentences :type hypotheses: list(list(str)) :param min_len: The minimum order of n-gram this function should extract. :type min_len: int :param max_len: The maximum order of n-gram this function should extract. :type max_len: int :param beta: the parameter to assign more importance to recall over precision :type beta: float :param ignore_whitespace: ignore whitespace characters in scoring :type ignore_whitespace: bool :return: the sentence level CHRF score. :rtype: float """ assert len(references) == len( hypotheses ), "The number of hypotheses and their references should be the same" num_sents = len(hypotheses) # Keep f-scores for each n-gram order separate ngram_fscores = defaultdict(lambda: list()) # Iterate through each hypothesis and their corresponding references. for reference, hypothesis in zip(references, hypotheses): # preprocess both reference and hypothesis reference = _preprocess(reference, ignore_whitespace) hypothesis = _preprocess(hypothesis, ignore_whitespace) # Calculate f-scores for each sentence and for each n-gram order # separately. for n in range(min_len, max_len + 1): # Compute the precision, recall, fscore and support. prec, rec, fscore, tp = chrf_precision_recall_fscore_support( reference, hypothesis, n, beta=beta ) ngram_fscores[n].append(fscore) # how many n-gram sizes num_ngram_sizes = len(ngram_fscores) # sum of f-scores over all sentences for each n-gram order total_scores = [sum(fscores) for n, fscores in ngram_fscores.items()] # macro-average over n-gram orders and over all sentences return (sum(total_scores) / num_ngram_sizes) / num_sents The provided code snippet includes necessary dependencies for implementing the `sentence_chrf` function. Write a Python function `def sentence_chrf( reference, hypothesis, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True )` to solve the following problem: Calculates the sentence level CHRF (Character n-gram F-score) described in - Maja Popovic. 2015. CHRF: Character n-gram F-score for Automatic MT Evaluation. In Proceedings of the 10th Workshop on Machine Translation. https://www.statmt.org/wmt15/pdf/WMT49.pdf - Maja Popovic. 2016. CHRF Deconstructed: β Parameters and n-gram Weights. In Proceedings of the 1st Conference on Machine Translation. https://www.statmt.org/wmt16/pdf/W16-2341.pdf This implementation of CHRF only supports a single reference at the moment. For details not reported in the paper, consult Maja Popovic's original implementation: https://github.com/m-popovic/chrF The code should output results equivalent to running CHRF++ with the following options: -nw 0 -b 3 An example from the original BLEU paper https://www.aclweb.org/anthology/P02-1040.pdf >>> ref1 = str('It is a guide to action that ensures that the military ' ... 'will forever heed Party commands').split() >>> hyp1 = str('It is a guide to action which ensures that the military ' ... 'always obeys the commands of the party').split() >>> hyp2 = str('It is to insure the troops forever hearing the activity ' ... 'guidebook that party direct').split() >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS 0.6349... >>> sentence_chrf(ref1, hyp2) # doctest: +ELLIPSIS 0.3330... The infamous "the the the ... " example >>> ref = 'the cat is on the mat'.split() >>> hyp = 'the the the the the the the'.split() >>> sentence_chrf(ref, hyp) # doctest: +ELLIPSIS 0.1468... An example to show that this function allows users to use strings instead of tokens, i.e. list(str) as inputs. >>> ref1 = str('It is a guide to action that ensures that the military ' ... 'will forever heed Party commands') >>> hyp1 = str('It is a guide to action which ensures that the military ' ... 'always obeys the commands of the party') >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS 0.6349... >>> type(ref1) == type(hyp1) == str True >>> sentence_chrf(ref1.split(), hyp1.split()) # doctest: +ELLIPSIS 0.6349... To skip the unigrams and only use 2- to 3-grams: >>> sentence_chrf(ref1, hyp1, min_len=2, max_len=3) # doctest: +ELLIPSIS 0.6617... :param references: reference sentence :type references: list(str) / str :param hypothesis: a hypothesis sentence :type hypothesis: list(str) / str :param min_len: The minimum order of n-gram this function should extract. :type min_len: int :param max_len: The maximum order of n-gram this function should extract. :type max_len: int :param beta: the parameter to assign more importance to recall over precision :type beta: float :param ignore_whitespace: ignore whitespace characters in scoring :type ignore_whitespace: bool :return: the sentence level CHRF score. :rtype: float Here is the function: def sentence_chrf( reference, hypothesis, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True ): """ Calculates the sentence level CHRF (Character n-gram F-score) described in - Maja Popovic. 2015. CHRF: Character n-gram F-score for Automatic MT Evaluation. In Proceedings of the 10th Workshop on Machine Translation. https://www.statmt.org/wmt15/pdf/WMT49.pdf - Maja Popovic. 2016. CHRF Deconstructed: β Parameters and n-gram Weights. In Proceedings of the 1st Conference on Machine Translation. https://www.statmt.org/wmt16/pdf/W16-2341.pdf This implementation of CHRF only supports a single reference at the moment. For details not reported in the paper, consult Maja Popovic's original implementation: https://github.com/m-popovic/chrF The code should output results equivalent to running CHRF++ with the following options: -nw 0 -b 3 An example from the original BLEU paper https://www.aclweb.org/anthology/P02-1040.pdf >>> ref1 = str('It is a guide to action that ensures that the military ' ... 'will forever heed Party commands').split() >>> hyp1 = str('It is a guide to action which ensures that the military ' ... 'always obeys the commands of the party').split() >>> hyp2 = str('It is to insure the troops forever hearing the activity ' ... 'guidebook that party direct').split() >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS 0.6349... >>> sentence_chrf(ref1, hyp2) # doctest: +ELLIPSIS 0.3330... The infamous "the the the ... " example >>> ref = 'the cat is on the mat'.split() >>> hyp = 'the the the the the the the'.split() >>> sentence_chrf(ref, hyp) # doctest: +ELLIPSIS 0.1468... An example to show that this function allows users to use strings instead of tokens, i.e. list(str) as inputs. >>> ref1 = str('It is a guide to action that ensures that the military ' ... 'will forever heed Party commands') >>> hyp1 = str('It is a guide to action which ensures that the military ' ... 'always obeys the commands of the party') >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS 0.6349... >>> type(ref1) == type(hyp1) == str True >>> sentence_chrf(ref1.split(), hyp1.split()) # doctest: +ELLIPSIS 0.6349... To skip the unigrams and only use 2- to 3-grams: >>> sentence_chrf(ref1, hyp1, min_len=2, max_len=3) # doctest: +ELLIPSIS 0.6617... :param references: reference sentence :type references: list(str) / str :param hypothesis: a hypothesis sentence :type hypothesis: list(str) / str :param min_len: The minimum order of n-gram this function should extract. :type min_len: int :param max_len: The maximum order of n-gram this function should extract. :type max_len: int :param beta: the parameter to assign more importance to recall over precision :type beta: float :param ignore_whitespace: ignore whitespace characters in scoring :type ignore_whitespace: bool :return: the sentence level CHRF score. :rtype: float """ return corpus_chrf( [reference], [hypothesis], min_len, max_len, beta=beta, ignore_whitespace=ignore_whitespace, )
Calculates the sentence level CHRF (Character n-gram F-score) described in - Maja Popovic. 2015. CHRF: Character n-gram F-score for Automatic MT Evaluation. In Proceedings of the 10th Workshop on Machine Translation. https://www.statmt.org/wmt15/pdf/WMT49.pdf - Maja Popovic. 2016. CHRF Deconstructed: β Parameters and n-gram Weights. In Proceedings of the 1st Conference on Machine Translation. https://www.statmt.org/wmt16/pdf/W16-2341.pdf This implementation of CHRF only supports a single reference at the moment. For details not reported in the paper, consult Maja Popovic's original implementation: https://github.com/m-popovic/chrF The code should output results equivalent to running CHRF++ with the following options: -nw 0 -b 3 An example from the original BLEU paper https://www.aclweb.org/anthology/P02-1040.pdf >>> ref1 = str('It is a guide to action that ensures that the military ' ... 'will forever heed Party commands').split() >>> hyp1 = str('It is a guide to action which ensures that the military ' ... 'always obeys the commands of the party').split() >>> hyp2 = str('It is to insure the troops forever hearing the activity ' ... 'guidebook that party direct').split() >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS 0.6349... >>> sentence_chrf(ref1, hyp2) # doctest: +ELLIPSIS 0.3330... The infamous "the the the ... " example >>> ref = 'the cat is on the mat'.split() >>> hyp = 'the the the the the the the'.split() >>> sentence_chrf(ref, hyp) # doctest: +ELLIPSIS 0.1468... An example to show that this function allows users to use strings instead of tokens, i.e. list(str) as inputs. >>> ref1 = str('It is a guide to action that ensures that the military ' ... 'will forever heed Party commands') >>> hyp1 = str('It is a guide to action which ensures that the military ' ... 'always obeys the commands of the party') >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS 0.6349... >>> type(ref1) == type(hyp1) == str True >>> sentence_chrf(ref1.split(), hyp1.split()) # doctest: +ELLIPSIS 0.6349... To skip the unigrams and only use 2- to 3-grams: >>> sentence_chrf(ref1, hyp1, min_len=2, max_len=3) # doctest: +ELLIPSIS 0.6617... :param references: reference sentence :type references: list(str) / str :param hypothesis: a hypothesis sentence :type hypothesis: list(str) / str :param min_len: The minimum order of n-gram this function should extract. :type min_len: int :param max_len: The maximum order of n-gram this function should extract. :type max_len: int :param beta: the parameter to assign more importance to recall over precision :type beta: float :param ignore_whitespace: ignore whitespace characters in scoring :type ignore_whitespace: bool :return: the sentence level CHRF score. :rtype: float
170,618
The provided code snippet includes necessary dependencies for implementing the `alignment_error_rate` function. Write a Python function `def alignment_error_rate(reference, hypothesis, possible=None)` to solve the following problem: Return the Alignment Error Rate (AER) of an alignment with respect to a "gold standard" reference alignment. Return an error rate between 0.0 (perfect alignment) and 1.0 (no alignment). >>> from nltk.translate import Alignment >>> ref = Alignment([(0, 0), (1, 1), (2, 2)]) >>> test = Alignment([(0, 0), (1, 2), (2, 1)]) >>> alignment_error_rate(ref, test) # doctest: +ELLIPSIS 0.6666666666666667 :type reference: Alignment :param reference: A gold standard alignment (sure alignments) :type hypothesis: Alignment :param hypothesis: A hypothesis alignment (aka. candidate alignments) :type possible: Alignment or None :param possible: A gold standard reference of possible alignments (defaults to *reference* if None) :rtype: float or None Here is the function: def alignment_error_rate(reference, hypothesis, possible=None): """ Return the Alignment Error Rate (AER) of an alignment with respect to a "gold standard" reference alignment. Return an error rate between 0.0 (perfect alignment) and 1.0 (no alignment). >>> from nltk.translate import Alignment >>> ref = Alignment([(0, 0), (1, 1), (2, 2)]) >>> test = Alignment([(0, 0), (1, 2), (2, 1)]) >>> alignment_error_rate(ref, test) # doctest: +ELLIPSIS 0.6666666666666667 :type reference: Alignment :param reference: A gold standard alignment (sure alignments) :type hypothesis: Alignment :param hypothesis: A hypothesis alignment (aka. candidate alignments) :type possible: Alignment or None :param possible: A gold standard reference of possible alignments (defaults to *reference* if None) :rtype: float or None """ if possible is None: possible = reference else: assert reference.issubset(possible) # sanity check return 1.0 - (len(hypothesis & reference) + len(hypothesis & possible)) / float( len(hypothesis) + len(reference) )
Return the Alignment Error Rate (AER) of an alignment with respect to a "gold standard" reference alignment. Return an error rate between 0.0 (perfect alignment) and 1.0 (no alignment). >>> from nltk.translate import Alignment >>> ref = Alignment([(0, 0), (1, 1), (2, 2)]) >>> test = Alignment([(0, 0), (1, 2), (2, 1)]) >>> alignment_error_rate(ref, test) # doctest: +ELLIPSIS 0.6666666666666667 :type reference: Alignment :param reference: A gold standard alignment (sure alignments) :type hypothesis: Alignment :param hypothesis: A hypothesis alignment (aka. candidate alignments) :type possible: Alignment or None :param possible: A gold standard reference of possible alignments (defaults to *reference* if None) :rtype: float or None
170,619
from functools import reduce from nltk.parse.api import ParserI from nltk.tree import ProbabilisticTree, Tree class ViterbiParser(ParserI): """ A bottom-up ``PCFG`` parser that uses dynamic programming to find the single most likely parse for a text. The ``ViterbiParser`` parser parses texts by filling in a "most likely constituent table". This table records the most probable tree representation for any given span and node value. In particular, it has an entry for every start index, end index, and node value, recording the most likely subtree that spans from the start index to the end index, and has the given node value. The ``ViterbiParser`` parser fills in this table incrementally. It starts by filling in all entries for constituents that span one element of text (i.e., entries where the end index is one greater than the start index). After it has filled in all table entries for constituents that span one element of text, it fills in the entries for constitutants that span two elements of text. It continues filling in the entries for constituents spanning larger and larger portions of the text, until the entire table has been filled. Finally, it returns the table entry for a constituent spanning the entire text, whose node value is the grammar's start symbol. In order to find the most likely constituent with a given span and node value, the ``ViterbiParser`` parser considers all productions that could produce that node value. For each production, it finds all children that collectively cover the span and have the node values specified by the production's right hand side. If the probability of the tree formed by applying the production to the children is greater than the probability of the current entry in the table, then the table is updated with this new tree. A pseudo-code description of the algorithm used by ``ViterbiParser`` is: | Create an empty most likely constituent table, *MLC*. | For width in 1...len(text): | For start in 1...len(text)-width: | For prod in grammar.productions: | For each sequence of subtrees [t[1], t[2], ..., t[n]] in MLC, | where t[i].label()==prod.rhs[i], | and the sequence covers [start:start+width]: | old_p = MLC[start, start+width, prod.lhs] | new_p = P(t[1])P(t[1])...P(t[n])P(prod) | if new_p > old_p: | new_tree = Tree(prod.lhs, t[1], t[2], ..., t[n]) | MLC[start, start+width, prod.lhs] = new_tree | Return MLC[0, len(text), start_symbol] :type _grammar: PCFG :ivar _grammar: The grammar used to parse sentences. :type _trace: int :ivar _trace: The level of tracing output that should be generated when parsing a text. """ def __init__(self, grammar, trace=0): """ Create a new ``ViterbiParser`` parser, that uses ``grammar`` to parse texts. :type grammar: PCFG :param grammar: The grammar used to parse texts. :type trace: int :param trace: The level of tracing that should be used when parsing a text. ``0`` will generate no tracing output; and higher numbers will produce more verbose tracing output. """ self._grammar = grammar self._trace = trace def grammar(self): return self._grammar def trace(self, trace=2): """ Set the level of tracing output that should be generated when parsing a text. :type trace: int :param trace: The trace level. A trace level of ``0`` will generate no tracing output; and higher trace levels will produce more verbose tracing output. :rtype: None """ self._trace = trace def parse(self, tokens): # Inherit docs from ParserI tokens = list(tokens) self._grammar.check_coverage(tokens) # The most likely constituent table. This table specifies the # most likely constituent for a given span and type. # Constituents can be either Trees or tokens. For Trees, # the "type" is the Nonterminal for the tree's root node # value. For Tokens, the "type" is the token's type. # The table is stored as a dictionary, since it is sparse. constituents = {} # Initialize the constituents dictionary with the words from # the text. if self._trace: print("Inserting tokens into the most likely" + " constituents table...") for index in range(len(tokens)): token = tokens[index] constituents[index, index + 1, token] = token if self._trace > 1: self._trace_lexical_insertion(token, index, len(tokens)) # Consider each span of length 1, 2, ..., n; and add any trees # that might cover that span to the constituents dictionary. for length in range(1, len(tokens) + 1): if self._trace: print( "Finding the most likely constituents" + " spanning %d text elements..." % length ) for start in range(len(tokens) - length + 1): span = (start, start + length) self._add_constituents_spanning(span, constituents, tokens) # Return the tree that spans the entire text & have the right cat tree = constituents.get((0, len(tokens), self._grammar.start())) if tree is not None: yield tree def _add_constituents_spanning(self, span, constituents, tokens): """ Find any constituents that might cover ``span``, and add them to the most likely constituents table. :rtype: None :type span: tuple(int, int) :param span: The section of the text for which we are trying to find possible constituents. The span is specified as a pair of integers, where the first integer is the index of the first token that should be included in the constituent; and the second integer is the index of the first token that should not be included in the constituent. I.e., the constituent should cover ``text[span[0]:span[1]]``, where ``text`` is the text that we are parsing. :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree) :param constituents: The most likely constituents table. This table records the most probable tree representation for any given span and node value. In particular, ``constituents(s,e,nv)`` is the most likely ``ProbabilisticTree`` that covers ``text[s:e]`` and has a node value ``nv.symbol()``, where ``text`` is the text that we are parsing. When ``_add_constituents_spanning`` is called, ``constituents`` should contain all possible constituents that are shorter than ``span``. :type tokens: list of tokens :param tokens: The text we are parsing. This is only used for trace output. """ # Since some of the grammar productions may be unary, we need to # repeatedly try all of the productions until none of them add any # new constituents. changed = True while changed: changed = False # Find all ways instantiations of the grammar productions that # cover the span. instantiations = self._find_instantiations(span, constituents) # For each production instantiation, add a new # ProbabilisticTree whose probability is the product # of the childrens' probabilities and the production's # probability. for (production, children) in instantiations: subtrees = [c for c in children if isinstance(c, Tree)] p = reduce(lambda pr, t: pr * t.prob(), subtrees, production.prob()) node = production.lhs().symbol() tree = ProbabilisticTree(node, children, prob=p) # If it's new a constituent, then add it to the # constituents dictionary. c = constituents.get((span[0], span[1], production.lhs())) if self._trace > 1: if c is None or c != tree: if c is None or c.prob() < tree.prob(): print(" Insert:", end=" ") else: print(" Discard:", end=" ") self._trace_production(production, p, span, len(tokens)) if c is None or c.prob() < tree.prob(): constituents[span[0], span[1], production.lhs()] = tree changed = True def _find_instantiations(self, span, constituents): """ :return: a list of the production instantiations that cover a given span of the text. A "production instantiation" is a tuple containing a production and a list of children, where the production's right hand side matches the list of children; and the children cover ``span``. :rtype: list of ``pair`` of ``Production``, (list of (``ProbabilisticTree`` or token. :type span: tuple(int, int) :param span: The section of the text for which we are trying to find production instantiations. The span is specified as a pair of integers, where the first integer is the index of the first token that should be covered by the production instantiation; and the second integer is the index of the first token that should not be covered by the production instantiation. :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree) :param constituents: The most likely constituents table. This table records the most probable tree representation for any given span and node value. See the module documentation for more information. """ rv = [] for production in self._grammar.productions(): childlists = self._match_rhs(production.rhs(), span, constituents) for childlist in childlists: rv.append((production, childlist)) return rv def _match_rhs(self, rhs, span, constituents): """ :return: a set of all the lists of children that cover ``span`` and that match ``rhs``. :rtype: list(list(ProbabilisticTree or token) :type rhs: list(Nonterminal or any) :param rhs: The list specifying what kinds of children need to cover ``span``. Each nonterminal in ``rhs`` specifies that the corresponding child should be a tree whose node value is that nonterminal's symbol. Each terminal in ``rhs`` specifies that the corresponding child should be a token whose type is that terminal. :type span: tuple(int, int) :param span: The section of the text for which we are trying to find child lists. The span is specified as a pair of integers, where the first integer is the index of the first token that should be covered by the child list; and the second integer is the index of the first token that should not be covered by the child list. :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree) :param constituents: The most likely constituents table. This table records the most probable tree representation for any given span and node value. See the module documentation for more information. """ (start, end) = span # Base case if start >= end and rhs == (): return [[]] if start >= end or rhs == (): return [] # Find everything that matches the 1st symbol of the RHS childlists = [] for split in range(start, end + 1): l = constituents.get((start, split, rhs[0])) if l is not None: rights = self._match_rhs(rhs[1:], (split, end), constituents) childlists += [[l] + r for r in rights] return childlists def _trace_production(self, production, p, span, width): """ Print trace output indicating that a given production has been applied at a given location. :param production: The production that has been applied :type production: Production :param p: The probability of the tree produced by the production. :type p: float :param span: The span of the production :type span: tuple :rtype: None """ str = "|" + "." * span[0] str += "=" * (span[1] - span[0]) str += "." * (width - span[1]) + "| " str += "%s" % production if self._trace > 2: str = f"{str:<40} {p:12.10f} " print(str) def _trace_lexical_insertion(self, token, index, width): str = " Insert: |" + "." * index + "=" + "." * (width - index - 1) + "| " str += f"{token}" print(str) def __repr__(self): return "<ViterbiParser for %r>" % self._grammar def reduce(function: Callable[[_T, _S], _T], sequence: Iterable[_S], initial: _T) -> _T: ... def reduce(function: Callable[[_T, _T], _T], sequence: Iterable[_T]) -> _T: ... import sys if sys.version_info >= (3, 9): from types import GenericAlias if sys.version_info >= (3, 8): def parse_qs( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ..., max_num_fields: Optional[int] = ..., ) -> Dict[AnyStr, List[AnyStr]]: ... def parse_qsl( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ..., max_num_fields: Optional[int] = ..., ) -> List[Tuple[AnyStr, AnyStr]]: ... else: def parse_qs( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ... ) -> Dict[AnyStr, List[AnyStr]]: ... def parse_qsl( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ... ) -> List[Tuple[AnyStr, AnyStr]]: ... class PCFG(CFG): """ A probabilistic context-free grammar. A PCFG consists of a start state and a set of productions with probabilities. The set of terminals and nonterminals is implicitly specified by the productions. PCFG productions use the ``ProbabilisticProduction`` class. ``PCFGs`` impose the constraint that the set of productions with any given left-hand-side must have probabilities that sum to 1 (allowing for a small margin of error). If you need efficient key-based access to productions, you can use a subclass to implement it. :type EPSILON: float :cvar EPSILON: The acceptable margin of error for checking that productions with a given left-hand side have probabilities that sum to 1. """ EPSILON = 0.01 def __init__(self, start, productions, calculate_leftcorners=True): """ Create a new context-free grammar, from the given start state and set of ``ProbabilisticProductions``. :param start: The start symbol :type start: Nonterminal :param productions: The list of productions that defines the grammar :type productions: list(Production) :raise ValueError: if the set of productions with any left-hand-side do not have probabilities that sum to a value within EPSILON of 1. :param calculate_leftcorners: False if we don't want to calculate the leftcorner relation. In that case, some optimized chart parsers won't work. :type calculate_leftcorners: bool """ CFG.__init__(self, start, productions, calculate_leftcorners) # Make sure that the probabilities sum to one. probs = {} for production in productions: probs[production.lhs()] = probs.get(production.lhs(), 0) + production.prob() for (lhs, p) in probs.items(): if not ((1 - PCFG.EPSILON) < p < (1 + PCFG.EPSILON)): raise ValueError("Productions for %r do not sum to 1" % lhs) def fromstring(cls, input, encoding=None): """ Return a probabilistic context-free grammar corresponding to the input string(s). :param input: a grammar, either in the form of a string or else as a list of strings. """ start, productions = read_grammar( input, standard_nonterm_parser, probabilistic=True, encoding=encoding ) return cls(start, productions) def draw_trees(*trees): """ Open a new window containing a graphical diagram of the given trees. :rtype: None """ TreeView(*trees).mainloop() return The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem: A demonstration of the probabilistic parsers. The user is prompted to select which demo to run, and how many parses should be found; and then each parser is run on the same demo, and a summary of the results are displayed. Here is the function: def demo(): """ A demonstration of the probabilistic parsers. The user is prompted to select which demo to run, and how many parses should be found; and then each parser is run on the same demo, and a summary of the results are displayed. """ import sys import time from nltk import tokenize from nltk.grammar import PCFG from nltk.parse import ViterbiParser toy_pcfg1 = PCFG.fromstring( """ S -> NP VP [1.0] NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] Det -> 'the' [0.8] | 'my' [0.2] N -> 'man' [0.5] | 'telescope' [0.5] VP -> VP PP [0.1] | V NP [0.7] | V [0.2] V -> 'ate' [0.35] | 'saw' [0.65] PP -> P NP [1.0] P -> 'with' [0.61] | 'under' [0.39] """ ) toy_pcfg2 = PCFG.fromstring( """ S -> NP VP [1.0] VP -> V NP [.59] VP -> V [.40] VP -> VP PP [.01] NP -> Det N [.41] NP -> Name [.28] NP -> NP PP [.31] PP -> P NP [1.0] V -> 'saw' [.21] V -> 'ate' [.51] V -> 'ran' [.28] N -> 'boy' [.11] N -> 'cookie' [.12] N -> 'table' [.13] N -> 'telescope' [.14] N -> 'hill' [.5] Name -> 'Jack' [.52] Name -> 'Bob' [.48] P -> 'with' [.61] P -> 'under' [.39] Det -> 'the' [.41] Det -> 'a' [.31] Det -> 'my' [.28] """ ) # Define two demos. Each demo has a sentence and a grammar. demos = [ ("I saw the man with my telescope", toy_pcfg1), ("the boy saw Jack with Bob under the table with a telescope", toy_pcfg2), ] # Ask the user which demo they want to use. print() for i in range(len(demos)): print(f"{i + 1:>3}: {demos[i][0]}") print(" %r" % demos[i][1]) print() print("Which demo (%d-%d)? " % (1, len(demos)), end=" ") try: snum = int(sys.stdin.readline().strip()) - 1 sent, grammar = demos[snum] except: print("Bad sentence number") return # Tokenize the sentence. tokens = sent.split() parser = ViterbiParser(grammar) all_parses = {} print(f"\nsent: {sent}\nparser: {parser}\ngrammar: {grammar}") parser.trace(3) t = time.time() parses = parser.parse_all(tokens) time = time.time() - t average = ( reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) if parses else 0 ) num_parses = len(parses) for p in parses: all_parses[p.freeze()] = 1 # Print some summary statistics print() print("Time (secs) # Parses Average P(parse)") print("-----------------------------------------") print("%11.4f%11d%19.14f" % (time, num_parses, average)) parses = all_parses.keys() if parses: p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) else: p = 0 print("------------------------------------------") print("%11s%11d%19.14f" % ("n/a", len(parses), p)) # Ask the user if we should draw the parses. print() print("Draw parses (y/n)? ", end=" ") if sys.stdin.readline().strip().lower().startswith("y"): from nltk.draw.tree import draw_trees print(" please wait...") draw_trees(*parses) # Ask the user if we should print the parses. print() print("Print parses (y/n)? ", end=" ") if sys.stdin.readline().strip().lower().startswith("y"): for parse in parses: print(parse)
A demonstration of the probabilistic parsers. The user is prompted to select which demo to run, and how many parses should be found; and then each parser is run on the same demo, and a summary of the results are displayed.
170,620
from nltk.data import load from nltk.grammar import CFG, PCFG, FeatureGrammar from nltk.parse.chart import Chart, ChartParser from nltk.parse.featurechart import FeatureChart, FeatureChartParser from nltk.parse.pchart import InsideChartParser def load( resource_url, format="auto", cache=True, verbose=False, logic_parser=None, fstruct_reader=None, encoding=None, ): """ Load a given resource from the NLTK data package. The following resource formats are currently supported: - ``pickle`` - ``json`` - ``yaml`` - ``cfg`` (context free grammars) - ``pcfg`` (probabilistic CFGs) - ``fcfg`` (feature-based CFGs) - ``fol`` (formulas of First Order Logic) - ``logic`` (Logical formulas to be parsed by the given logic_parser) - ``val`` (valuation of First Order Logic model) - ``text`` (the file contents as a unicode string) - ``raw`` (the raw file contents as a byte string) If no format is specified, ``load()`` will attempt to determine a format based on the resource name's file extension. If that fails, ``load()`` will raise a ``ValueError`` exception. For all text formats (everything except ``pickle``, ``json``, ``yaml`` and ``raw``), it tries to decode the raw contents using UTF-8, and if that doesn't work, it tries with ISO-8859-1 (Latin-1), unless the ``encoding`` is specified. :type resource_url: str :param resource_url: A URL specifying where the resource should be loaded from. The default protocol is "nltk:", which searches for the file in the the NLTK data package. :type cache: bool :param cache: If true, add this resource to a cache. If load() finds a resource in its cache, then it will return it from the cache rather than loading it. :type verbose: bool :param verbose: If true, print a message when loading a resource. Messages are not displayed when a resource is retrieved from the cache. :type logic_parser: LogicParser :param logic_parser: The parser that will be used to parse logical expressions. :type fstruct_reader: FeatStructReader :param fstruct_reader: The parser that will be used to parse the feature structure of an fcfg. :type encoding: str :param encoding: the encoding of the input; only used for text formats. """ resource_url = normalize_resource_url(resource_url) resource_url = add_py3_data(resource_url) # Determine the format of the resource. if format == "auto": resource_url_parts = resource_url.split(".") ext = resource_url_parts[-1] if ext == "gz": ext = resource_url_parts[-2] format = AUTO_FORMATS.get(ext) if format is None: raise ValueError( "Could not determine format for %s based " 'on its file\nextension; use the "format" ' "argument to specify the format explicitly." % resource_url ) if format not in FORMATS: raise ValueError(f"Unknown format type: {format}!") # If we've cached the resource, then just return it. if cache: resource_val = _resource_cache.get((resource_url, format)) if resource_val is not None: if verbose: print(f"<<Using cached copy of {resource_url}>>") return resource_val # Let the user know what's going on. if verbose: print(f"<<Loading {resource_url}>>") # Load the resource. opened_resource = _open(resource_url) if format == "raw": resource_val = opened_resource.read() elif format == "pickle": resource_val = pickle.load(opened_resource) elif format == "json": import json from nltk.jsontags import json_tags resource_val = json.load(opened_resource) tag = None if len(resource_val) != 1: tag = next(resource_val.keys()) if tag not in json_tags: raise ValueError("Unknown json tag.") elif format == "yaml": import yaml resource_val = yaml.safe_load(opened_resource) else: # The resource is a text format. binary_data = opened_resource.read() if encoding is not None: string_data = binary_data.decode(encoding) else: try: string_data = binary_data.decode("utf-8") except UnicodeDecodeError: string_data = binary_data.decode("latin-1") if format == "text": resource_val = string_data elif format == "cfg": resource_val = grammar.CFG.fromstring(string_data, encoding=encoding) elif format == "pcfg": resource_val = grammar.PCFG.fromstring(string_data, encoding=encoding) elif format == "fcfg": resource_val = grammar.FeatureGrammar.fromstring( string_data, logic_parser=logic_parser, fstruct_reader=fstruct_reader, encoding=encoding, ) elif format == "fol": resource_val = sem.read_logic( string_data, logic_parser=sem.logic.LogicParser(), encoding=encoding, ) elif format == "logic": resource_val = sem.read_logic( string_data, logic_parser=logic_parser, encoding=encoding ) elif format == "val": resource_val = sem.read_valuation(string_data, encoding=encoding) else: raise AssertionError( "Internal NLTK error: Format %s isn't " "handled by nltk.data.load()" % (format,) ) opened_resource.close() # If requested, add it to the cache. if cache: try: _resource_cache[(resource_url, format)] = resource_val # TODO: add this line # print('<<Caching a copy of %s>>' % (resource_url,)) except TypeError: # We can't create weak references to some object types, like # strings and tuples. For now, just don't cache them. pass return resource_val class CFG: """ A context-free grammar. A grammar consists of a start state and a set of productions. The set of terminals and nonterminals is implicitly specified by the productions. If you need efficient key-based access to productions, you can use a subclass to implement it. """ def __init__(self, start, productions, calculate_leftcorners=True): """ Create a new context-free grammar, from the given start state and set of ``Production`` instances. :param start: The start symbol :type start: Nonterminal :param productions: The list of productions that defines the grammar :type productions: list(Production) :param calculate_leftcorners: False if we don't want to calculate the leftcorner relation. In that case, some optimized chart parsers won't work. :type calculate_leftcorners: bool """ if not is_nonterminal(start): raise TypeError( "start should be a Nonterminal object," " not a %s" % type(start).__name__ ) self._start = start self._productions = productions self._categories = {prod.lhs() for prod in productions} self._calculate_indexes() self._calculate_grammar_forms() if calculate_leftcorners: self._calculate_leftcorners() def _calculate_indexes(self): self._lhs_index = {} self._rhs_index = {} self._empty_index = {} self._lexical_index = {} for prod in self._productions: # Left hand side. lhs = prod._lhs if lhs not in self._lhs_index: self._lhs_index[lhs] = [] self._lhs_index[lhs].append(prod) if prod._rhs: # First item in right hand side. rhs0 = prod._rhs[0] if rhs0 not in self._rhs_index: self._rhs_index[rhs0] = [] self._rhs_index[rhs0].append(prod) else: # The right hand side is empty. self._empty_index[prod.lhs()] = prod # Lexical tokens in the right hand side. for token in prod._rhs: if is_terminal(token): self._lexical_index.setdefault(token, set()).add(prod) def _calculate_leftcorners(self): # Calculate leftcorner relations, for use in optimized parsing. self._immediate_leftcorner_categories = {cat: {cat} for cat in self._categories} self._immediate_leftcorner_words = {cat: set() for cat in self._categories} for prod in self.productions(): if len(prod) > 0: cat, left = prod.lhs(), prod.rhs()[0] if is_nonterminal(left): self._immediate_leftcorner_categories[cat].add(left) else: self._immediate_leftcorner_words[cat].add(left) lc = transitive_closure(self._immediate_leftcorner_categories, reflexive=True) self._leftcorners = lc self._leftcorner_parents = invert_graph(lc) nr_leftcorner_categories = sum( map(len, self._immediate_leftcorner_categories.values()) ) nr_leftcorner_words = sum(map(len, self._immediate_leftcorner_words.values())) if nr_leftcorner_words > nr_leftcorner_categories > 10000: # If the grammar is big, the leftcorner-word dictionary will be too large. # In that case it is better to calculate the relation on demand. self._leftcorner_words = None return self._leftcorner_words = {} for cat in self._leftcorners: lefts = self._leftcorners[cat] lc = self._leftcorner_words[cat] = set() for left in lefts: lc.update(self._immediate_leftcorner_words.get(left, set())) def fromstring(cls, input, encoding=None): """ Return the grammar instance corresponding to the input string(s). :param input: a grammar, either in the form of a string or as a list of strings. """ start, productions = read_grammar( input, standard_nonterm_parser, encoding=encoding ) return cls(start, productions) def start(self): """ Return the start symbol of the grammar :rtype: Nonterminal """ return self._start # tricky to balance readability and efficiency here! # can't use set operations as they don't preserve ordering def productions(self, lhs=None, rhs=None, empty=False): """ Return the grammar productions, filtered by the left-hand side or the first item in the right-hand side. :param lhs: Only return productions with the given left-hand side. :param rhs: Only return productions with the given first item in the right-hand side. :param empty: Only return productions with an empty right-hand side. :return: A list of productions matching the given constraints. :rtype: list(Production) """ if rhs and empty: raise ValueError( "You cannot select empty and non-empty " "productions at the same time." ) # no constraints so return everything if not lhs and not rhs: if not empty: return self._productions else: return self._empty_index.values() # only lhs specified so look up its index elif lhs and not rhs: if not empty: return self._lhs_index.get(lhs, []) elif lhs in self._empty_index: return [self._empty_index[lhs]] else: return [] # only rhs specified so look up its index elif rhs and not lhs: return self._rhs_index.get(rhs, []) # intersect else: return [ prod for prod in self._lhs_index.get(lhs, []) if prod in self._rhs_index.get(rhs, []) ] def leftcorners(self, cat): """ Return the set of all nonterminals that the given nonterminal can start with, including itself. This is the reflexive, transitive closure of the immediate leftcorner relation: (A > B) iff (A -> B beta) :param cat: the parent of the leftcorners :type cat: Nonterminal :return: the set of all leftcorners :rtype: set(Nonterminal) """ return self._leftcorners.get(cat, {cat}) def is_leftcorner(self, cat, left): """ True if left is a leftcorner of cat, where left can be a terminal or a nonterminal. :param cat: the parent of the leftcorner :type cat: Nonterminal :param left: the suggested leftcorner :type left: Terminal or Nonterminal :rtype: bool """ if is_nonterminal(left): return left in self.leftcorners(cat) elif self._leftcorner_words: return left in self._leftcorner_words.get(cat, set()) else: return any( left in self._immediate_leftcorner_words.get(parent, set()) for parent in self.leftcorners(cat) ) def leftcorner_parents(self, cat): """ Return the set of all nonterminals for which the given category is a left corner. This is the inverse of the leftcorner relation. :param cat: the suggested leftcorner :type cat: Nonterminal :return: the set of all parents to the leftcorner :rtype: set(Nonterminal) """ return self._leftcorner_parents.get(cat, {cat}) def check_coverage(self, tokens): """ Check whether the grammar rules cover the given list of tokens. If not, then raise an exception. :type tokens: list(str) """ missing = [tok for tok in tokens if not self._lexical_index.get(tok)] if missing: missing = ", ".join(f"{w!r}" for w in missing) raise ValueError( "Grammar does not cover some of the " "input words: %r." % missing ) def _calculate_grammar_forms(self): """ Pre-calculate of which form(s) the grammar is. """ prods = self._productions self._is_lexical = all(p.is_lexical() for p in prods) self._is_nonlexical = all(p.is_nonlexical() for p in prods if len(p) != 1) self._min_len = min(len(p) for p in prods) self._max_len = max(len(p) for p in prods) self._all_unary_are_lexical = all(p.is_lexical() for p in prods if len(p) == 1) def is_lexical(self): """ Return True if all productions are lexicalised. """ return self._is_lexical def is_nonlexical(self): """ Return True if all lexical rules are "preterminals", that is, unary rules which can be separated in a preprocessing step. This means that all productions are of the forms A -> B1 ... Bn (n>=0), or A -> "s". Note: is_lexical() and is_nonlexical() are not opposites. There are grammars which are neither, and grammars which are both. """ return self._is_nonlexical def min_len(self): """ Return the right-hand side length of the shortest grammar production. """ return self._min_len def max_len(self): """ Return the right-hand side length of the longest grammar production. """ return self._max_len def is_nonempty(self): """ Return True if there are no empty productions. """ return self._min_len > 0 def is_binarised(self): """ Return True if all productions are at most binary. Note that there can still be empty and unary productions. """ return self._max_len <= 2 def is_flexible_chomsky_normal_form(self): """ Return True if all productions are of the forms A -> B C, A -> B, or A -> "s". """ return self.is_nonempty() and self.is_nonlexical() and self.is_binarised() def is_chomsky_normal_form(self): """ Return True if the grammar is of Chomsky Normal Form, i.e. all productions are of the form A -> B C, or A -> "s". """ return self.is_flexible_chomsky_normal_form() and self._all_unary_are_lexical def chomsky_normal_form(self, new_token_padding="@$@", flexible=False): """ Returns a new Grammar that is in chomsky normal :param: new_token_padding Customise new rule formation during binarisation """ if self.is_chomsky_normal_form(): return self if self.productions(empty=True): raise ValueError( "Grammar has Empty rules. " "Cannot deal with them at the moment" ) # check for mixed rules for rule in self.productions(): if rule.is_lexical() and len(rule.rhs()) > 1: raise ValueError( f"Cannot handled mixed rule {rule.lhs()} => {rule.rhs()}" ) step1 = CFG.eliminate_start(self) step2 = CFG.binarize(step1, new_token_padding) if flexible: return step2 step3 = CFG.remove_unitary_rules(step2) step4 = CFG(step3.start(), list(set(step3.productions()))) return step4 def remove_unitary_rules(cls, grammar): """ Remove nonlexical unitary rules and convert them to lexical """ result = [] unitary = [] for rule in grammar.productions(): if len(rule) == 1 and rule.is_nonlexical(): unitary.append(rule) else: result.append(rule) while unitary: rule = unitary.pop(0) for item in grammar.productions(lhs=rule.rhs()[0]): new_rule = Production(rule.lhs(), item.rhs()) if len(new_rule) != 1 or new_rule.is_lexical(): result.append(new_rule) else: unitary.append(new_rule) n_grammar = CFG(grammar.start(), result) return n_grammar def binarize(cls, grammar, padding="@$@"): """ Convert all non-binary rules into binary by introducing new tokens. Example:: Original: A => B C D After Conversion: A => B A@$@B A@$@B => C D """ result = [] for rule in grammar.productions(): if len(rule.rhs()) > 2: # this rule needs to be broken down left_side = rule.lhs() for k in range(0, len(rule.rhs()) - 2): tsym = rule.rhs()[k] new_sym = Nonterminal(left_side.symbol() + padding + tsym.symbol()) new_production = Production(left_side, (tsym, new_sym)) left_side = new_sym result.append(new_production) last_prd = Production(left_side, rule.rhs()[-2:]) result.append(last_prd) else: result.append(rule) n_grammar = CFG(grammar.start(), result) return n_grammar def eliminate_start(cls, grammar): """ Eliminate start rule in case it appears on RHS Example: S -> S0 S1 and S0 -> S1 S Then another rule S0_Sigma -> S is added """ start = grammar.start() result = [] need_to_add = None for rule in grammar.productions(): if start in rule.rhs(): need_to_add = True result.append(rule) if need_to_add: start = Nonterminal("S0_SIGMA") result.append(Production(start, [grammar.start()])) n_grammar = CFG(start, result) return n_grammar return grammar def __repr__(self): return "<Grammar with %d productions>" % len(self._productions) def __str__(self): result = "Grammar with %d productions" % len(self._productions) result += " (start state = %r)" % self._start for production in self._productions: result += "\n %s" % production return result class FeatureGrammar(CFG): """ A feature-based grammar. This is equivalent to a ``CFG`` whose nonterminals are all ``FeatStructNonterminal``. A grammar consists of a start state and a set of productions. The set of terminals and nonterminals is implicitly specified by the productions. """ def __init__(self, start, productions): """ Create a new feature-based grammar, from the given start state and set of ``Productions``. :param start: The start symbol :type start: FeatStructNonterminal :param productions: The list of productions that defines the grammar :type productions: list(Production) """ CFG.__init__(self, start, productions) # The difference with CFG is that the productions are # indexed on the TYPE feature of the nonterminals. # This is calculated by the method _get_type_if_possible(). def _calculate_indexes(self): self._lhs_index = {} self._rhs_index = {} self._empty_index = {} self._empty_productions = [] self._lexical_index = {} for prod in self._productions: # Left hand side. lhs = self._get_type_if_possible(prod._lhs) if lhs not in self._lhs_index: self._lhs_index[lhs] = [] self._lhs_index[lhs].append(prod) if prod._rhs: # First item in right hand side. rhs0 = self._get_type_if_possible(prod._rhs[0]) if rhs0 not in self._rhs_index: self._rhs_index[rhs0] = [] self._rhs_index[rhs0].append(prod) else: # The right hand side is empty. if lhs not in self._empty_index: self._empty_index[lhs] = [] self._empty_index[lhs].append(prod) self._empty_productions.append(prod) # Lexical tokens in the right hand side. for token in prod._rhs: if is_terminal(token): self._lexical_index.setdefault(token, set()).add(prod) def fromstring( cls, input, features=None, logic_parser=None, fstruct_reader=None, encoding=None ): """ Return a feature structure based grammar. :param input: a grammar, either in the form of a string or else as a list of strings. :param features: a tuple of features (default: SLASH, TYPE) :param logic_parser: a parser for lambda-expressions, by default, ``LogicParser()`` :param fstruct_reader: a feature structure parser (only if features and logic_parser is None) """ if features is None: features = (SLASH, TYPE) if fstruct_reader is None: fstruct_reader = FeatStructReader( features, FeatStructNonterminal, logic_parser=logic_parser ) elif logic_parser is not None: raise Exception( "'logic_parser' and 'fstruct_reader' must " "not both be set" ) start, productions = read_grammar( input, fstruct_reader.read_partial, encoding=encoding ) return cls(start, productions) def productions(self, lhs=None, rhs=None, empty=False): """ Return the grammar productions, filtered by the left-hand side or the first item in the right-hand side. :param lhs: Only return productions with the given left-hand side. :param rhs: Only return productions with the given first item in the right-hand side. :param empty: Only return productions with an empty right-hand side. :rtype: list(Production) """ if rhs and empty: raise ValueError( "You cannot select empty and non-empty " "productions at the same time." ) # no constraints so return everything if not lhs and not rhs: if empty: return self._empty_productions else: return self._productions # only lhs specified so look up its index elif lhs and not rhs: if empty: return self._empty_index.get(self._get_type_if_possible(lhs), []) else: return self._lhs_index.get(self._get_type_if_possible(lhs), []) # only rhs specified so look up its index elif rhs and not lhs: return self._rhs_index.get(self._get_type_if_possible(rhs), []) # intersect else: return [ prod for prod in self._lhs_index.get(self._get_type_if_possible(lhs), []) if prod in self._rhs_index.get(self._get_type_if_possible(rhs), []) ] def leftcorners(self, cat): """ Return the set of all words that the given category can start with. Also called the "first set" in compiler construction. """ raise NotImplementedError("Not implemented yet") def leftcorner_parents(self, cat): """ Return the set of all categories for which the given category is a left corner. """ raise NotImplementedError("Not implemented yet") def _get_type_if_possible(self, item): """ Helper function which returns the ``TYPE`` feature of the ``item``, if it exists, otherwise it returns the ``item`` itself """ if isinstance(item, dict) and TYPE in item: return FeatureValueType(item[TYPE]) else: return item class PCFG(CFG): """ A probabilistic context-free grammar. A PCFG consists of a start state and a set of productions with probabilities. The set of terminals and nonterminals is implicitly specified by the productions. PCFG productions use the ``ProbabilisticProduction`` class. ``PCFGs`` impose the constraint that the set of productions with any given left-hand-side must have probabilities that sum to 1 (allowing for a small margin of error). If you need efficient key-based access to productions, you can use a subclass to implement it. :type EPSILON: float :cvar EPSILON: The acceptable margin of error for checking that productions with a given left-hand side have probabilities that sum to 1. """ EPSILON = 0.01 def __init__(self, start, productions, calculate_leftcorners=True): """ Create a new context-free grammar, from the given start state and set of ``ProbabilisticProductions``. :param start: The start symbol :type start: Nonterminal :param productions: The list of productions that defines the grammar :type productions: list(Production) :raise ValueError: if the set of productions with any left-hand-side do not have probabilities that sum to a value within EPSILON of 1. :param calculate_leftcorners: False if we don't want to calculate the leftcorner relation. In that case, some optimized chart parsers won't work. :type calculate_leftcorners: bool """ CFG.__init__(self, start, productions, calculate_leftcorners) # Make sure that the probabilities sum to one. probs = {} for production in productions: probs[production.lhs()] = probs.get(production.lhs(), 0) + production.prob() for (lhs, p) in probs.items(): if not ((1 - PCFG.EPSILON) < p < (1 + PCFG.EPSILON)): raise ValueError("Productions for %r do not sum to 1" % lhs) def fromstring(cls, input, encoding=None): """ Return a probabilistic context-free grammar corresponding to the input string(s). :param input: a grammar, either in the form of a string or else as a list of strings. """ start, productions = read_grammar( input, standard_nonterm_parser, probabilistic=True, encoding=encoding ) return cls(start, productions) class Chart: """ A blackboard for hypotheses about the syntactic constituents of a sentence. A chart contains a set of edges, and each edge encodes a single hypothesis about the structure of some portion of the sentence. The ``select`` method can be used to select a specific collection of edges. For example ``chart.select(is_complete=True, start=0)`` yields all complete edges whose start indices are 0. To ensure the efficiency of these selection operations, ``Chart`` dynamically creates and maintains an index for each set of attributes that have been selected on. In order to reconstruct the trees that are represented by an edge, the chart associates each edge with a set of child pointer lists. A child pointer list is a list of the edges that license an edge's right-hand side. :ivar _tokens: The sentence that the chart covers. :ivar _num_leaves: The number of tokens. :ivar _edges: A list of the edges in the chart :ivar _edge_to_cpls: A dictionary mapping each edge to a set of child pointer lists that are associated with that edge. :ivar _indexes: A dictionary mapping tuples of edge attributes to indices, where each index maps the corresponding edge attribute values to lists of edges. """ def __init__(self, tokens): """ Construct a new chart. The chart is initialized with the leaf edges corresponding to the terminal leaves. :type tokens: list :param tokens: The sentence that this chart will be used to parse. """ # Record the sentence token and the sentence length. self._tokens = tuple(tokens) self._num_leaves = len(self._tokens) # Initialise the chart. self.initialize() def initialize(self): """ Clear the chart. """ # A list of edges contained in this chart. self._edges = [] # The set of child pointer lists associated with each edge. self._edge_to_cpls = {} # Indexes mapping attribute values to lists of edges # (used by select()). self._indexes = {} # //////////////////////////////////////////////////////////// # Sentence Access # //////////////////////////////////////////////////////////// def num_leaves(self): """ Return the number of words in this chart's sentence. :rtype: int """ return self._num_leaves def leaf(self, index): """ Return the leaf value of the word at the given index. :rtype: str """ return self._tokens[index] def leaves(self): """ Return a list of the leaf values of each word in the chart's sentence. :rtype: list(str) """ return self._tokens # //////////////////////////////////////////////////////////// # Edge access # //////////////////////////////////////////////////////////// def edges(self): """ Return a list of all edges in this chart. New edges that are added to the chart after the call to edges() will *not* be contained in this list. :rtype: list(EdgeI) :see: ``iteredges``, ``select`` """ return self._edges[:] def iteredges(self): """ Return an iterator over the edges in this chart. It is not guaranteed that new edges which are added to the chart before the iterator is exhausted will also be generated. :rtype: iter(EdgeI) :see: ``edges``, ``select`` """ return iter(self._edges) # Iterating over the chart yields its edges. __iter__ = iteredges def num_edges(self): """ Return the number of edges contained in this chart. :rtype: int """ return len(self._edge_to_cpls) def select(self, **restrictions): """ Return an iterator over the edges in this chart. Any new edges that are added to the chart before the iterator is exahusted will also be generated. ``restrictions`` can be used to restrict the set of edges that will be generated. :param span: Only generate edges ``e`` where ``e.span()==span`` :param start: Only generate edges ``e`` where ``e.start()==start`` :param end: Only generate edges ``e`` where ``e.end()==end`` :param length: Only generate edges ``e`` where ``e.length()==length`` :param lhs: Only generate edges ``e`` where ``e.lhs()==lhs`` :param rhs: Only generate edges ``e`` where ``e.rhs()==rhs`` :param nextsym: Only generate edges ``e`` where ``e.nextsym()==nextsym`` :param dot: Only generate edges ``e`` where ``e.dot()==dot`` :param is_complete: Only generate edges ``e`` where ``e.is_complete()==is_complete`` :param is_incomplete: Only generate edges ``e`` where ``e.is_incomplete()==is_incomplete`` :rtype: iter(EdgeI) """ # If there are no restrictions, then return all edges. if restrictions == {}: return iter(self._edges) # Find the index corresponding to the given restrictions. restr_keys = sorted(restrictions.keys()) restr_keys = tuple(restr_keys) # If it doesn't exist, then create it. if restr_keys not in self._indexes: self._add_index(restr_keys) vals = tuple(restrictions[key] for key in restr_keys) return iter(self._indexes[restr_keys].get(vals, [])) def _add_index(self, restr_keys): """ A helper function for ``select``, which creates a new index for a given set of attributes (aka restriction keys). """ # Make sure it's a valid index. for key in restr_keys: if not hasattr(EdgeI, key): raise ValueError("Bad restriction: %s" % key) # Create the index. index = self._indexes[restr_keys] = {} # Add all existing edges to the index. for edge in self._edges: vals = tuple(getattr(edge, key)() for key in restr_keys) index.setdefault(vals, []).append(edge) def _register_with_indexes(self, edge): """ A helper function for ``insert``, which registers the new edge with all existing indexes. """ for (restr_keys, index) in self._indexes.items(): vals = tuple(getattr(edge, key)() for key in restr_keys) index.setdefault(vals, []).append(edge) # //////////////////////////////////////////////////////////// # Edge Insertion # //////////////////////////////////////////////////////////// def insert_with_backpointer(self, new_edge, previous_edge, child_edge): """ Add a new edge to the chart, using a pointer to the previous edge. """ cpls = self.child_pointer_lists(previous_edge) new_cpls = [cpl + (child_edge,) for cpl in cpls] return self.insert(new_edge, *new_cpls) def insert(self, edge, *child_pointer_lists): """ Add a new edge to the chart, and return True if this operation modified the chart. In particular, return true iff the chart did not already contain ``edge``, or if it did not already associate ``child_pointer_lists`` with ``edge``. :type edge: EdgeI :param edge: The new edge :type child_pointer_lists: sequence of tuple(EdgeI) :param child_pointer_lists: A sequence of lists of the edges that were used to form this edge. This list is used to reconstruct the trees (or partial trees) that are associated with ``edge``. :rtype: bool """ # Is it a new edge? if edge not in self._edge_to_cpls: # Add it to the list of edges. self._append_edge(edge) # Register with indexes. self._register_with_indexes(edge) # Get the set of child pointer lists for this edge. cpls = self._edge_to_cpls.setdefault(edge, OrderedDict()) chart_was_modified = False for child_pointer_list in child_pointer_lists: child_pointer_list = tuple(child_pointer_list) if child_pointer_list not in cpls: # It's a new CPL; register it, and return true. cpls[child_pointer_list] = True chart_was_modified = True return chart_was_modified def _append_edge(self, edge): self._edges.append(edge) # //////////////////////////////////////////////////////////// # Tree extraction & child pointer lists # //////////////////////////////////////////////////////////// def parses(self, root, tree_class=Tree): """ Return an iterator of the complete tree structures that span the entire chart, and whose root node is ``root``. """ for edge in self.select(start=0, end=self._num_leaves, lhs=root): yield from self.trees(edge, tree_class=tree_class, complete=True) def trees(self, edge, tree_class=Tree, complete=False): """ Return an iterator of the tree structures that are associated with ``edge``. If ``edge`` is incomplete, then the unexpanded children will be encoded as childless subtrees, whose node value is the corresponding terminal or nonterminal. :rtype: list(Tree) :note: If two trees share a common subtree, then the same Tree may be used to encode that subtree in both trees. If you need to eliminate this subtree sharing, then create a deep copy of each tree. """ return iter(self._trees(edge, complete, memo={}, tree_class=tree_class)) def _trees(self, edge, complete, memo, tree_class): """ A helper function for ``trees``. :param memo: A dictionary used to record the trees that we've generated for each edge, so that when we see an edge more than once, we can reuse the same trees. """ # If we've seen this edge before, then reuse our old answer. if edge in memo: return memo[edge] # when we're reading trees off the chart, don't use incomplete edges if complete and edge.is_incomplete(): return [] # Leaf edges. if isinstance(edge, LeafEdge): leaf = self._tokens[edge.start()] memo[edge] = [leaf] return [leaf] # Until we're done computing the trees for edge, set # memo[edge] to be empty. This has the effect of filtering # out any cyclic trees (i.e., trees that contain themselves as # descendants), because if we reach this edge via a cycle, # then it will appear that the edge doesn't generate any trees. memo[edge] = [] trees = [] lhs = edge.lhs().symbol() # Each child pointer list can be used to form trees. for cpl in self.child_pointer_lists(edge): # Get the set of child choices for each child pointer. # child_choices[i] is the set of choices for the tree's # ith child. child_choices = [self._trees(cp, complete, memo, tree_class) for cp in cpl] # For each combination of children, add a tree. for children in itertools.product(*child_choices): trees.append(tree_class(lhs, children)) # If the edge is incomplete, then extend it with "partial trees": if edge.is_incomplete(): unexpanded = [tree_class(elt, []) for elt in edge.rhs()[edge.dot() :]] for tree in trees: tree.extend(unexpanded) # Update the memoization dictionary. memo[edge] = trees # Return the list of trees. return trees def child_pointer_lists(self, edge): """ Return the set of child pointer lists for the given edge. Each child pointer list is a list of edges that have been used to form this edge. :rtype: list(list(EdgeI)) """ # Make a copy, in case they modify it. return self._edge_to_cpls.get(edge, {}).keys() # //////////////////////////////////////////////////////////// # Display # //////////////////////////////////////////////////////////// def pretty_format_edge(self, edge, width=None): """ Return a pretty-printed string representation of a given edge in this chart. :rtype: str :param width: The number of characters allotted to each index in the sentence. """ if width is None: width = 50 // (self.num_leaves() + 1) (start, end) = (edge.start(), edge.end()) str = "|" + ("." + " " * (width - 1)) * start # Zero-width edges are "#" if complete, ">" if incomplete if start == end: if edge.is_complete(): str += "#" else: str += ">" # Spanning complete edges are "[===]"; Other edges are # "[---]" if complete, "[--->" if incomplete elif edge.is_complete() and edge.span() == (0, self._num_leaves): str += "[" + ("=" * width) * (end - start - 1) + "=" * (width - 1) + "]" elif edge.is_complete(): str += "[" + ("-" * width) * (end - start - 1) + "-" * (width - 1) + "]" else: str += "[" + ("-" * width) * (end - start - 1) + "-" * (width - 1) + ">" str += (" " * (width - 1) + ".") * (self._num_leaves - end) return str + "| %s" % edge def pretty_format_leaves(self, width=None): """ Return a pretty-printed string representation of this chart's leaves. This string can be used as a header for calls to ``pretty_format_edge``. """ if width is None: width = 50 // (self.num_leaves() + 1) if self._tokens is not None and width > 1: header = "|." for tok in self._tokens: header += tok[: width - 1].center(width - 1) + "." header += "|" else: header = "" return header def pretty_format(self, width=None): """ Return a pretty-printed string representation of this chart. :param width: The number of characters allotted to each index in the sentence. :rtype: str """ if width is None: width = 50 // (self.num_leaves() + 1) # sort edges: primary key=length, secondary key=start index. # (and filter out the token edges) edges = sorted((e.length(), e.start(), e) for e in self) edges = [e for (_, _, e) in edges] return ( self.pretty_format_leaves(width) + "\n" + "\n".join(self.pretty_format_edge(edge, width) for edge in edges) ) # //////////////////////////////////////////////////////////// # Display: Dot (AT&T Graphviz) # //////////////////////////////////////////////////////////// def dot_digraph(self): # Header s = "digraph nltk_chart {\n" # s += ' size="5,5";\n' s += " rankdir=LR;\n" s += " node [height=0.1,width=0.1];\n" s += ' node [style=filled, color="lightgray"];\n' # Set up the nodes for y in range(self.num_edges(), -1, -1): if y == 0: s += ' node [style=filled, color="black"];\n' for x in range(self.num_leaves() + 1): if y == 0 or ( x <= self._edges[y - 1].start() or x >= self._edges[y - 1].end() ): s += ' %04d.%04d [label=""];\n' % (x, y) # Add a spacer s += " x [style=invis]; x->0000.0000 [style=invis];\n" # Declare ranks. for x in range(self.num_leaves() + 1): s += " {rank=same;" for y in range(self.num_edges() + 1): if y == 0 or ( x <= self._edges[y - 1].start() or x >= self._edges[y - 1].end() ): s += " %04d.%04d" % (x, y) s += "}\n" # Add the leaves s += " edge [style=invis, weight=100];\n" s += " node [shape=plaintext]\n" s += " 0000.0000" for x in range(self.num_leaves()): s += "->%s->%04d.0000" % (self.leaf(x), x + 1) s += ";\n\n" # Add the edges s += " edge [style=solid, weight=1];\n" for y, edge in enumerate(self): for x in range(edge.start()): s += ' %04d.%04d -> %04d.%04d [style="invis"];\n' % ( x, y + 1, x + 1, y + 1, ) s += ' %04d.%04d -> %04d.%04d [label="%s"];\n' % ( edge.start(), y + 1, edge.end(), y + 1, edge, ) for x in range(edge.end(), self.num_leaves()): s += ' %04d.%04d -> %04d.%04d [style="invis"];\n' % ( x, y + 1, x + 1, y + 1, ) s += "}\n" return s class ChartParser(ParserI): """ A generic chart parser. A "strategy", or list of ``ChartRuleI`` instances, is used to decide what edges to add to the chart. In particular, ``ChartParser`` uses the following algorithm to parse texts: | Until no new edges are added: | For each *rule* in *strategy*: | Apply *rule* to any applicable edges in the chart. | Return any complete parses in the chart """ def __init__( self, grammar, strategy=BU_LC_STRATEGY, trace=0, trace_chart_width=50, use_agenda=True, chart_class=Chart, ): """ Create a new chart parser, that uses ``grammar`` to parse texts. :type grammar: CFG :param grammar: The grammar used to parse texts. :type strategy: list(ChartRuleI) :param strategy: A list of rules that should be used to decide what edges to add to the chart (top-down strategy by default). :type trace: int :param trace: The level of tracing that should be used when parsing a text. ``0`` will generate no tracing output; and higher numbers will produce more verbose tracing output. :type trace_chart_width: int :param trace_chart_width: The default total width reserved for the chart in trace output. The remainder of each line will be used to display edges. :type use_agenda: bool :param use_agenda: Use an optimized agenda-based algorithm, if possible. :param chart_class: The class that should be used to create the parse charts. """ self._grammar = grammar self._strategy = strategy self._trace = trace self._trace_chart_width = trace_chart_width # If the strategy only consists of axioms (NUM_EDGES==0) and # inference rules (NUM_EDGES==1), we can use an agenda-based algorithm: self._use_agenda = use_agenda self._chart_class = chart_class self._axioms = [] self._inference_rules = [] for rule in strategy: if rule.NUM_EDGES == 0: self._axioms.append(rule) elif rule.NUM_EDGES == 1: self._inference_rules.append(rule) else: self._use_agenda = False def grammar(self): return self._grammar def _trace_new_edges(self, chart, rule, new_edges, trace, edge_width): if not trace: return print_rule_header = trace > 1 for edge in new_edges: if print_rule_header: print("%s:" % rule) print_rule_header = False print(chart.pretty_format_edge(edge, edge_width)) def chart_parse(self, tokens, trace=None): """ Return the final parse ``Chart`` from which all possible parse trees can be extracted. :param tokens: The sentence to be parsed :type tokens: list(str) :rtype: Chart """ if trace is None: trace = self._trace trace_new_edges = self._trace_new_edges tokens = list(tokens) self._grammar.check_coverage(tokens) chart = self._chart_class(tokens) grammar = self._grammar # Width, for printing trace edges. trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1) if trace: print(chart.pretty_format_leaves(trace_edge_width)) if self._use_agenda: # Use an agenda-based algorithm. for axiom in self._axioms: new_edges = list(axiom.apply(chart, grammar)) trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width) inference_rules = self._inference_rules agenda = chart.edges() # We reverse the initial agenda, since it is a stack # but chart.edges() functions as a queue. agenda.reverse() while agenda: edge = agenda.pop() for rule in inference_rules: new_edges = list(rule.apply(chart, grammar, edge)) if trace: trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) agenda += new_edges else: # Do not use an agenda-based algorithm. edges_added = True while edges_added: edges_added = False for rule in self._strategy: new_edges = list(rule.apply_everywhere(chart, grammar)) edges_added = len(new_edges) trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) # Return the final chart. return chart def parse(self, tokens, tree_class=Tree): chart = self.chart_parse(tokens) return iter(chart.parses(self._grammar.start(), tree_class=tree_class)) class FeatureChart(Chart): """ A Chart for feature grammars. :see: ``Chart`` for more information. """ def select(self, **restrictions): """ Returns an iterator over the edges in this chart. See ``Chart.select`` for more information about the ``restrictions`` on the edges. """ # If there are no restrictions, then return all edges. if restrictions == {}: return iter(self._edges) # Find the index corresponding to the given restrictions. restr_keys = sorted(restrictions.keys()) restr_keys = tuple(restr_keys) # If it doesn't exist, then create it. if restr_keys not in self._indexes: self._add_index(restr_keys) vals = tuple( self._get_type_if_possible(restrictions[key]) for key in restr_keys ) return iter(self._indexes[restr_keys].get(vals, [])) def _add_index(self, restr_keys): """ A helper function for ``select``, which creates a new index for a given set of attributes (aka restriction keys). """ # Make sure it's a valid index. for key in restr_keys: if not hasattr(EdgeI, key): raise ValueError("Bad restriction: %s" % key) # Create the index. index = self._indexes[restr_keys] = {} # Add all existing edges to the index. for edge in self._edges: vals = tuple( self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys ) index.setdefault(vals, []).append(edge) def _register_with_indexes(self, edge): """ A helper function for ``insert``, which registers the new edge with all existing indexes. """ for (restr_keys, index) in self._indexes.items(): vals = tuple( self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys ) index.setdefault(vals, []).append(edge) def _get_type_if_possible(self, item): """ Helper function which returns the ``TYPE`` feature of the ``item``, if it exists, otherwise it returns the ``item`` itself """ if isinstance(item, dict) and TYPE in item: return item[TYPE] else: return item def parses(self, start, tree_class=Tree): for edge in self.select(start=0, end=self._num_leaves): if ( (isinstance(edge, FeatureTreeEdge)) and (edge.lhs()[TYPE] == start[TYPE]) and (unify(edge.lhs(), start, rename_vars=True)) ): yield from self.trees(edge, complete=True, tree_class=tree_class) class FeatureChartParser(ChartParser): def __init__( self, grammar, strategy=BU_LC_FEATURE_STRATEGY, trace_chart_width=20, chart_class=FeatureChart, **parser_args, ): ChartParser.__init__( self, grammar, strategy=strategy, trace_chart_width=trace_chart_width, chart_class=chart_class, **parser_args, ) class InsideChartParser(BottomUpProbabilisticChartParser): """ A bottom-up parser for ``PCFG`` grammars that tries edges in descending order of the inside probabilities of their trees. The "inside probability" of a tree is simply the probability of the entire tree, ignoring its context. In particular, the inside probability of a tree generated by production *p* with children *c[1], c[2], ..., c[n]* is *P(p)P(c[1])P(c[2])...P(c[n])*; and the inside probability of a token is 1 if it is present in the text, and 0 if it is absent. This sorting order results in a type of lowest-cost-first search strategy. """ # Inherit constructor. def sort_queue(self, queue, chart): """ Sort the given queue of edges, in descending order of the inside probabilities of the edges' trees. :param queue: The queue of ``Edge`` objects to sort. Each edge in this queue is an edge that could be added to the chart by the fundamental rule; but that has not yet been added. :type queue: list(Edge) :param chart: The chart being used to parse the text. This chart can be used to provide extra information for sorting the queue. :type chart: Chart :rtype: None """ queue.sort(key=lambda edge: edge.prob()) The provided code snippet includes necessary dependencies for implementing the `load_parser` function. Write a Python function `def load_parser( grammar_url, trace=0, parser=None, chart_class=None, beam_size=0, **load_args )` to solve the following problem: Load a grammar from a file, and build a parser based on that grammar. The parser depends on the grammar format, and might also depend on properties of the grammar itself. The following grammar formats are currently supported: - ``'cfg'`` (CFGs: ``CFG``) - ``'pcfg'`` (probabilistic CFGs: ``PCFG``) - ``'fcfg'`` (feature-based CFGs: ``FeatureGrammar``) :type grammar_url: str :param grammar_url: A URL specifying where the grammar is located. The default protocol is ``"nltk:"``, which searches for the file in the the NLTK data package. :type trace: int :param trace: The level of tracing that should be used when parsing a text. ``0`` will generate no tracing output; and higher numbers will produce more verbose tracing output. :param parser: The class used for parsing; should be ``ChartParser`` or a subclass. If None, the class depends on the grammar format. :param chart_class: The class used for storing the chart; should be ``Chart`` or a subclass. Only used for CFGs and feature CFGs. If None, the chart class depends on the grammar format. :type beam_size: int :param beam_size: The maximum length for the parser's edge queue. Only used for probabilistic CFGs. :param load_args: Keyword parameters used when loading the grammar. See ``data.load`` for more information. Here is the function: def load_parser( grammar_url, trace=0, parser=None, chart_class=None, beam_size=0, **load_args ): """ Load a grammar from a file, and build a parser based on that grammar. The parser depends on the grammar format, and might also depend on properties of the grammar itself. The following grammar formats are currently supported: - ``'cfg'`` (CFGs: ``CFG``) - ``'pcfg'`` (probabilistic CFGs: ``PCFG``) - ``'fcfg'`` (feature-based CFGs: ``FeatureGrammar``) :type grammar_url: str :param grammar_url: A URL specifying where the grammar is located. The default protocol is ``"nltk:"``, which searches for the file in the the NLTK data package. :type trace: int :param trace: The level of tracing that should be used when parsing a text. ``0`` will generate no tracing output; and higher numbers will produce more verbose tracing output. :param parser: The class used for parsing; should be ``ChartParser`` or a subclass. If None, the class depends on the grammar format. :param chart_class: The class used for storing the chart; should be ``Chart`` or a subclass. Only used for CFGs and feature CFGs. If None, the chart class depends on the grammar format. :type beam_size: int :param beam_size: The maximum length for the parser's edge queue. Only used for probabilistic CFGs. :param load_args: Keyword parameters used when loading the grammar. See ``data.load`` for more information. """ grammar = load(grammar_url, **load_args) if not isinstance(grammar, CFG): raise ValueError("The grammar must be a CFG, " "or a subclass thereof.") if isinstance(grammar, PCFG): if parser is None: parser = InsideChartParser return parser(grammar, trace=trace, beam_size=beam_size) elif isinstance(grammar, FeatureGrammar): if parser is None: parser = FeatureChartParser if chart_class is None: chart_class = FeatureChart return parser(grammar, trace=trace, chart_class=chart_class) else: # Plain CFG. if parser is None: parser = ChartParser if chart_class is None: chart_class = Chart return parser(grammar, trace=trace, chart_class=chart_class)
Load a grammar from a file, and build a parser based on that grammar. The parser depends on the grammar format, and might also depend on properties of the grammar itself. The following grammar formats are currently supported: - ``'cfg'`` (CFGs: ``CFG``) - ``'pcfg'`` (probabilistic CFGs: ``PCFG``) - ``'fcfg'`` (feature-based CFGs: ``FeatureGrammar``) :type grammar_url: str :param grammar_url: A URL specifying where the grammar is located. The default protocol is ``"nltk:"``, which searches for the file in the the NLTK data package. :type trace: int :param trace: The level of tracing that should be used when parsing a text. ``0`` will generate no tracing output; and higher numbers will produce more verbose tracing output. :param parser: The class used for parsing; should be ``ChartParser`` or a subclass. If None, the class depends on the grammar format. :param chart_class: The class used for storing the chart; should be ``Chart`` or a subclass. Only used for CFGs and feature CFGs. If None, the chart class depends on the grammar format. :type beam_size: int :param beam_size: The maximum length for the parser's edge queue. Only used for probabilistic CFGs. :param load_args: Keyword parameters used when loading the grammar. See ``data.load`` for more information.
170,621
from nltk.data import load from nltk.grammar import CFG, PCFG, FeatureGrammar from nltk.parse.chart import Chart, ChartParser from nltk.parse.featurechart import FeatureChart, FeatureChartParser from nltk.parse.pchart import InsideChartParser def taggedsent_to_conll(sentence): """ A module to convert a single POS tagged sentence into CONLL format. >>> from nltk import word_tokenize, pos_tag >>> text = "This is a foobar sentence." >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))): # doctest: +NORMALIZE_WHITESPACE ... print(line, end="") 1 This _ DT DT _ 0 a _ _ 2 is _ VBZ VBZ _ 0 a _ _ 3 a _ DT DT _ 0 a _ _ 4 foobar _ JJ JJ _ 0 a _ _ 5 sentence _ NN NN _ 0 a _ _ 6 . _ . . _ 0 a _ _ :param sentence: A single input sentence to parse :type sentence: list(tuple(str, str)) :rtype: iter(str) :return: a generator yielding a single sentence in CONLL format. """ for (i, (word, tag)) in enumerate(sentence, start=1): input_str = [str(i), word, "_", tag, tag, "_", "0", "a", "_", "_"] input_str = "\t".join(input_str) + "\n" yield input_str The provided code snippet includes necessary dependencies for implementing the `taggedsents_to_conll` function. Write a Python function `def taggedsents_to_conll(sentences)` to solve the following problem: A module to convert the a POS tagged document stream (i.e. list of list of tuples, a list of sentences) and yield lines in CONLL format. This module yields one line per word and two newlines for end of sentence. >>> from nltk import word_tokenize, sent_tokenize, pos_tag >>> text = "This is a foobar sentence. Is that right?" >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)] >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE ... if line: ... print(line, end="") 1 This _ DT DT _ 0 a _ _ 2 is _ VBZ VBZ _ 0 a _ _ 3 a _ DT DT _ 0 a _ _ 4 foobar _ JJ JJ _ 0 a _ _ 5 sentence _ NN NN _ 0 a _ _ 6 . _ . . _ 0 a _ _ <BLANKLINE> <BLANKLINE> 1 Is _ VBZ VBZ _ 0 a _ _ 2 that _ IN IN _ 0 a _ _ 3 right _ NN NN _ 0 a _ _ 4 ? _ . . _ 0 a _ _ <BLANKLINE> <BLANKLINE> :param sentences: Input sentences to parse :type sentence: list(list(tuple(str, str))) :rtype: iter(str) :return: a generator yielding sentences in CONLL format. Here is the function: def taggedsents_to_conll(sentences): """ A module to convert the a POS tagged document stream (i.e. list of list of tuples, a list of sentences) and yield lines in CONLL format. This module yields one line per word and two newlines for end of sentence. >>> from nltk import word_tokenize, sent_tokenize, pos_tag >>> text = "This is a foobar sentence. Is that right?" >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)] >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE ... if line: ... print(line, end="") 1 This _ DT DT _ 0 a _ _ 2 is _ VBZ VBZ _ 0 a _ _ 3 a _ DT DT _ 0 a _ _ 4 foobar _ JJ JJ _ 0 a _ _ 5 sentence _ NN NN _ 0 a _ _ 6 . _ . . _ 0 a _ _ <BLANKLINE> <BLANKLINE> 1 Is _ VBZ VBZ _ 0 a _ _ 2 that _ IN IN _ 0 a _ _ 3 right _ NN NN _ 0 a _ _ 4 ? _ . . _ 0 a _ _ <BLANKLINE> <BLANKLINE> :param sentences: Input sentences to parse :type sentence: list(list(tuple(str, str))) :rtype: iter(str) :return: a generator yielding sentences in CONLL format. """ for sentence in sentences: yield from taggedsent_to_conll(sentence) yield "\n\n"
A module to convert the a POS tagged document stream (i.e. list of list of tuples, a list of sentences) and yield lines in CONLL format. This module yields one line per word and two newlines for end of sentence. >>> from nltk import word_tokenize, sent_tokenize, pos_tag >>> text = "This is a foobar sentence. Is that right?" >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)] >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE ... if line: ... print(line, end="") 1 This _ DT DT _ 0 a _ _ 2 is _ VBZ VBZ _ 0 a _ _ 3 a _ DT DT _ 0 a _ _ 4 foobar _ JJ JJ _ 0 a _ _ 5 sentence _ NN NN _ 0 a _ _ 6 . _ . . _ 0 a _ _ <BLANKLINE> <BLANKLINE> 1 Is _ VBZ VBZ _ 0 a _ _ 2 that _ IN IN _ 0 a _ _ 3 right _ NN NN _ 0 a _ _ 4 ? _ . . _ 0 a _ _ <BLANKLINE> <BLANKLINE> :param sentences: Input sentences to parse :type sentence: list(list(tuple(str, str))) :rtype: iter(str) :return: a generator yielding sentences in CONLL format.
170,622
from nltk.data import load from nltk.grammar import CFG, PCFG, FeatureGrammar from nltk.parse.chart import Chart, ChartParser from nltk.parse.featurechart import FeatureChart, FeatureChartParser from nltk.parse.pchart import InsideChartParser The provided code snippet includes necessary dependencies for implementing the `extract_test_sentences` function. Write a Python function `def extract_test_sentences(string, comment_chars="#%;", encoding=None)` to solve the following problem: Parses a string with one test sentence per line. Lines can optionally begin with: - a bool, saying if the sentence is grammatical or not, or - an int, giving the number of parse trees is should have, The result information is followed by a colon, and then the sentence. Empty lines and lines beginning with a comment char are ignored. :return: a list of tuple of sentences and expected results, where a sentence is a list of str, and a result is None, or bool, or int :param comment_chars: ``str`` of possible comment characters. :param encoding: the encoding of the string, if it is binary Here is the function: def extract_test_sentences(string, comment_chars="#%;", encoding=None): """ Parses a string with one test sentence per line. Lines can optionally begin with: - a bool, saying if the sentence is grammatical or not, or - an int, giving the number of parse trees is should have, The result information is followed by a colon, and then the sentence. Empty lines and lines beginning with a comment char are ignored. :return: a list of tuple of sentences and expected results, where a sentence is a list of str, and a result is None, or bool, or int :param comment_chars: ``str`` of possible comment characters. :param encoding: the encoding of the string, if it is binary """ if encoding is not None: string = string.decode(encoding) sentences = [] for sentence in string.split("\n"): if sentence == "" or sentence[0] in comment_chars: continue split_info = sentence.split(":", 1) result = None if len(split_info) == 2: if split_info[0] in ["True", "true", "False", "false"]: result = split_info[0] in ["True", "true"] sentence = split_info[1] else: result = int(split_info[0]) sentence = split_info[1] tokens = sentence.split() if tokens == []: continue sentences += [(tokens, result)] return sentences
Parses a string with one test sentence per line. Lines can optionally begin with: - a bool, saying if the sentence is grammatical or not, or - an int, giving the number of parse trees is should have, The result information is followed by a colon, and then the sentence. Empty lines and lines beginning with a comment char are ignored. :return: a list of tuple of sentences and expected results, where a sentence is a list of str, and a result is None, or bool, or int :param comment_chars: ``str`` of possible comment characters. :param encoding: the encoding of the string, if it is binary
170,623
import logging import math from nltk.parse.dependencygraph import DependencyGraph def nonprojective_conll_parse_demo(): from nltk.parse.dependencygraph import conll_data2 graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry] npp = ProbabilisticNonprojectiveParser() npp.train(graphs, NaiveBayesDependencyScorer()) for parse_graph in npp.parse( ["Cathy", "zag", "hen", "zwaaien", "."], ["N", "V", "Pron", "Adj", "N", "Punc"] ): print(parse_graph) def rule_based_demo(): from nltk.grammar import DependencyGrammar grammar = DependencyGrammar.fromstring( """ 'taught' -> 'play' | 'man' 'man' -> 'the' | 'in' 'in' -> 'corner' 'corner' -> 'the' 'play' -> 'golf' | 'dachshund' | 'to' 'dachshund' -> 'his' """ ) print(grammar) ndp = NonprojectiveDependencyParser(grammar) graphs = ndp.parse( [ "the", "man", "in", "the", "corner", "taught", "his", "dachshund", "to", "play", "golf", ] ) print("Graphs:") for graph in graphs: print(graph) def demo(): # hall_demo() nonprojective_conll_parse_demo() rule_based_demo()
null
170,624
import logging import math from nltk.parse.dependencygraph import DependencyGraph class DemoScorer(DependencyScorerI): def train(self, graphs): print("Training...") def score(self, graph): # scores for Keith Hall 'K-best Spanning Tree Parsing' paper return [ [[], [5], [1], [1]], [[], [], [11], [4]], [[], [10], [], [5]], [[], [8], [8], []], ] class ProbabilisticNonprojectiveParser: """A probabilistic non-projective dependency parser. Nonprojective dependencies allows for "crossing branches" in the parse tree which is necessary for representing particular linguistic phenomena, or even typical parses in some languages. This parser follows the MST parsing algorithm, outlined in McDonald(2005), which likens the search for the best non-projective parse to finding the maximum spanning tree in a weighted directed graph. >>> class Scorer(DependencyScorerI): ... def train(self, graphs): ... pass ... ... def score(self, graph): ... return [ ... [[], [5], [1], [1]], ... [[], [], [11], [4]], ... [[], [10], [], [5]], ... [[], [8], [8], []], ... ] >>> npp = ProbabilisticNonprojectiveParser() >>> npp.train([], Scorer()) >>> parses = npp.parse(['v1', 'v2', 'v3'], [None, None, None]) >>> len(list(parses)) 1 Rule based example >>> from nltk.grammar import DependencyGrammar >>> grammar = DependencyGrammar.fromstring(''' ... 'taught' -> 'play' | 'man' ... 'man' -> 'the' | 'in' ... 'in' -> 'corner' ... 'corner' -> 'the' ... 'play' -> 'golf' | 'dachshund' | 'to' ... 'dachshund' -> 'his' ... ''') >>> ndp = NonprojectiveDependencyParser(grammar) >>> parses = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf']) >>> len(list(parses)) 4 """ def __init__(self): """ Creates a new non-projective parser. """ logging.debug("initializing prob. nonprojective...") def train(self, graphs, dependency_scorer): """ Trains a ``DependencyScorerI`` from a set of ``DependencyGraph`` objects, and establishes this as the parser's scorer. This is used to initialize the scores on a ``DependencyGraph`` during the parsing procedure. :type graphs: list(DependencyGraph) :param graphs: A list of dependency graphs to train the scorer. :type dependency_scorer: DependencyScorerI :param dependency_scorer: A scorer which implements the ``DependencyScorerI`` interface. """ self._scorer = dependency_scorer self._scorer.train(graphs) def initialize_edge_scores(self, graph): """ Assigns a score to every edge in the ``DependencyGraph`` graph. These scores are generated via the parser's scorer which was assigned during the training process. :type graph: DependencyGraph :param graph: A dependency graph to assign scores to. """ self.scores = self._scorer.score(graph) def collapse_nodes(self, new_node, cycle_path, g_graph, b_graph, c_graph): """ Takes a list of nodes that have been identified to belong to a cycle, and collapses them into on larger node. The arcs of all nodes in the graph must be updated to account for this. :type new_node: Node. :param new_node: A Node (Dictionary) to collapse the cycle nodes into. :type cycle_path: A list of integers. :param cycle_path: A list of node addresses, each of which is in the cycle. :type g_graph, b_graph, c_graph: DependencyGraph :param g_graph, b_graph, c_graph: Graphs which need to be updated. """ logger.debug("Collapsing nodes...") # Collapse all cycle nodes into v_n+1 in G_Graph for cycle_node_index in cycle_path: g_graph.remove_by_address(cycle_node_index) g_graph.add_node(new_node) g_graph.redirect_arcs(cycle_path, new_node["address"]) def update_edge_scores(self, new_node, cycle_path): """ Updates the edge scores to reflect a collapse operation into new_node. :type new_node: A Node. :param new_node: The node which cycle nodes are collapsed into. :type cycle_path: A list of integers. :param cycle_path: A list of node addresses that belong to the cycle. """ logger.debug("cycle %s", cycle_path) cycle_path = self.compute_original_indexes(cycle_path) logger.debug("old cycle %s", cycle_path) logger.debug("Prior to update: %s", self.scores) for i, row in enumerate(self.scores): for j, column in enumerate(self.scores[i]): logger.debug(self.scores[i][j]) if j in cycle_path and i not in cycle_path and self.scores[i][j]: subtract_val = self.compute_max_subtract_score(j, cycle_path) logger.debug("%s - %s", self.scores[i][j], subtract_val) new_vals = [] for cur_val in self.scores[i][j]: new_vals.append(cur_val - subtract_val) self.scores[i][j] = new_vals for i, row in enumerate(self.scores): for j, cell in enumerate(self.scores[i]): if i in cycle_path and j in cycle_path: self.scores[i][j] = [] logger.debug("After update: %s", self.scores) def compute_original_indexes(self, new_indexes): """ As nodes are collapsed into others, they are replaced by the new node in the graph, but it's still necessary to keep track of what these original nodes were. This takes a list of node addresses and replaces any collapsed node addresses with their original addresses. :type new_indexes: A list of integers. :param new_indexes: A list of node addresses to check for subsumed nodes. """ swapped = True while swapped: originals = [] swapped = False for new_index in new_indexes: if new_index in self.inner_nodes: for old_val in self.inner_nodes[new_index]: if old_val not in originals: originals.append(old_val) swapped = True else: originals.append(new_index) new_indexes = originals return new_indexes def compute_max_subtract_score(self, column_index, cycle_indexes): """ When updating scores the score of the highest-weighted incoming arc is subtracted upon collapse. This returns the correct amount to subtract from that edge. :type column_index: integer. :param column_index: A index representing the column of incoming arcs to a particular node being updated :type cycle_indexes: A list of integers. :param cycle_indexes: Only arcs from cycle nodes are considered. This is a list of such nodes addresses. """ max_score = -100000 for row_index in cycle_indexes: for subtract_val in self.scores[row_index][column_index]: if subtract_val > max_score: max_score = subtract_val return max_score def best_incoming_arc(self, node_index): """ Returns the source of the best incoming arc to the node with address: node_index :type node_index: integer. :param node_index: The address of the 'destination' node, the node that is arced to. """ originals = self.compute_original_indexes([node_index]) logger.debug("originals: %s", originals) max_arc = None max_score = None for row_index in range(len(self.scores)): for col_index in range(len(self.scores[row_index])): if col_index in originals and ( max_score is None or self.scores[row_index][col_index] > max_score ): max_score = self.scores[row_index][col_index] max_arc = row_index logger.debug("%s, %s", row_index, col_index) logger.debug(max_score) for key in self.inner_nodes: replaced_nodes = self.inner_nodes[key] if max_arc in replaced_nodes: return key return max_arc def original_best_arc(self, node_index): originals = self.compute_original_indexes([node_index]) max_arc = None max_score = None max_orig = None for row_index in range(len(self.scores)): for col_index in range(len(self.scores[row_index])): if col_index in originals and ( max_score is None or self.scores[row_index][col_index] > max_score ): max_score = self.scores[row_index][col_index] max_arc = row_index max_orig = col_index return [max_arc, max_orig] def parse(self, tokens, tags): """ Parses a list of tokens in accordance to the MST parsing algorithm for non-projective dependency parses. Assumes that the tokens to be parsed have already been tagged and those tags are provided. Various scoring methods can be used by implementing the ``DependencyScorerI`` interface and passing it to the training algorithm. :type tokens: list(str) :param tokens: A list of words or punctuation to be parsed. :type tags: list(str) :param tags: A list of tags corresponding by index to the words in the tokens list. :return: An iterator of non-projective parses. :rtype: iter(DependencyGraph) """ self.inner_nodes = {} # Initialize g_graph g_graph = DependencyGraph() for index, token in enumerate(tokens): g_graph.nodes[index + 1].update( {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} ) # Fully connect non-root nodes in g_graph g_graph.connect_graph() original_graph = DependencyGraph() for index, token in enumerate(tokens): original_graph.nodes[index + 1].update( {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} ) b_graph = DependencyGraph() c_graph = DependencyGraph() for index, token in enumerate(tokens): c_graph.nodes[index + 1].update( {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} ) # Assign initial scores to g_graph edges self.initialize_edge_scores(g_graph) logger.debug(self.scores) # Initialize a list of unvisited vertices (by node address) unvisited_vertices = [vertex["address"] for vertex in c_graph.nodes.values()] # Iterate over unvisited vertices nr_vertices = len(tokens) betas = {} while unvisited_vertices: # Mark current node as visited current_vertex = unvisited_vertices.pop(0) logger.debug("current_vertex: %s", current_vertex) # Get corresponding node n_i to vertex v_i current_node = g_graph.get_by_address(current_vertex) logger.debug("current_node: %s", current_node) # Get best in-edge node b for current node best_in_edge = self.best_incoming_arc(current_vertex) betas[current_vertex] = self.original_best_arc(current_vertex) logger.debug("best in arc: %s --> %s", best_in_edge, current_vertex) # b_graph = Union(b_graph, b) for new_vertex in [current_vertex, best_in_edge]: b_graph.nodes[new_vertex].update( {"word": "TEMP", "rel": "NTOP", "address": new_vertex} ) b_graph.add_arc(best_in_edge, current_vertex) # Beta(current node) = b - stored for parse recovery # If b_graph contains a cycle, collapse it cycle_path = b_graph.contains_cycle() if cycle_path: # Create a new node v_n+1 with address = len(nodes) + 1 new_node = {"word": "NONE", "rel": "NTOP", "address": nr_vertices + 1} # c_graph = Union(c_graph, v_n+1) c_graph.add_node(new_node) # Collapse all nodes in cycle C into v_n+1 self.update_edge_scores(new_node, cycle_path) self.collapse_nodes(new_node, cycle_path, g_graph, b_graph, c_graph) for cycle_index in cycle_path: c_graph.add_arc(new_node["address"], cycle_index) # self.replaced_by[cycle_index] = new_node['address'] self.inner_nodes[new_node["address"]] = cycle_path # Add v_n+1 to list of unvisited vertices unvisited_vertices.insert(0, nr_vertices + 1) # increment # of nodes counter nr_vertices += 1 # Remove cycle nodes from b_graph; B = B - cycle c for cycle_node_address in cycle_path: b_graph.remove_by_address(cycle_node_address) logger.debug("g_graph: %s", g_graph) logger.debug("b_graph: %s", b_graph) logger.debug("c_graph: %s", c_graph) logger.debug("Betas: %s", betas) logger.debug("replaced nodes %s", self.inner_nodes) # Recover parse tree logger.debug("Final scores: %s", self.scores) logger.debug("Recovering parse...") for i in range(len(tokens) + 1, nr_vertices + 1): betas[betas[i][1]] = betas[i] logger.debug("Betas: %s", betas) for node in original_graph.nodes.values(): # TODO: It's dangerous to assume that deps it a dictionary # because it's a default dictionary. Ideally, here we should not # be concerned how dependencies are stored inside of a dependency # graph. node["deps"] = {} for i in range(1, len(tokens) + 1): original_graph.add_arc(betas[i][0], betas[i][1]) logger.debug("Done.") yield original_graph def hall_demo(): npp = ProbabilisticNonprojectiveParser() npp.train([], DemoScorer()) for parse_graph in npp.parse(["v1", "v2", "v3"], [None, None, None]): print(parse_graph)
null
170,625
from nltk.grammar import Nonterminal from nltk.parse.api import ParserI from nltk.tree import ImmutableTree, Tree class RecursiveDescentParser(ParserI): """ A simple top-down CFG parser that parses texts by recursively expanding the fringe of a Tree, and matching it against a text. ``RecursiveDescentParser`` uses a list of tree locations called a "frontier" to remember which subtrees have not yet been expanded and which leaves have not yet been matched against the text. Each tree location consists of a list of child indices specifying the path from the root of the tree to a subtree or a leaf; see the reference documentation for Tree for more information about tree locations. When the parser begins parsing a text, it constructs a tree containing only the start symbol, and a frontier containing the location of the tree's root node. It then extends the tree to cover the text, using the following recursive procedure: - If the frontier is empty, and the text is covered by the tree, then return the tree as a possible parse. - If the frontier is empty, and the text is not covered by the tree, then return no parses. - If the first element of the frontier is a subtree, then use CFG productions to "expand" it. For each applicable production, add the expanded subtree's children to the frontier, and recursively find all parses that can be generated by the new tree and frontier. - If the first element of the frontier is a token, then "match" it against the next token from the text. Remove the token from the frontier, and recursively find all parses that can be generated by the new tree and frontier. :see: ``nltk.grammar`` """ def __init__(self, grammar, trace=0): """ Create a new ``RecursiveDescentParser``, that uses ``grammar`` to parse texts. :type grammar: CFG :param grammar: The grammar used to parse texts. :type trace: int :param trace: The level of tracing that should be used when parsing a text. ``0`` will generate no tracing output; and higher numbers will produce more verbose tracing output. """ self._grammar = grammar self._trace = trace def grammar(self): return self._grammar def parse(self, tokens): # Inherit docs from ParserI tokens = list(tokens) self._grammar.check_coverage(tokens) # Start a recursive descent parse, with an initial tree # containing just the start symbol. start = self._grammar.start().symbol() initial_tree = Tree(start, []) frontier = [()] if self._trace: self._trace_start(initial_tree, frontier, tokens) return self._parse(tokens, initial_tree, frontier) def _parse(self, remaining_text, tree, frontier): """ Recursively expand and match each elements of ``tree`` specified by ``frontier``, to cover ``remaining_text``. Return a list of all parses found. :return: An iterator of all parses that can be generated by matching and expanding the elements of ``tree`` specified by ``frontier``. :rtype: iter(Tree) :type tree: Tree :param tree: A partial structure for the text that is currently being parsed. The elements of ``tree`` that are specified by ``frontier`` have not yet been expanded or matched. :type remaining_text: list(str) :param remaining_text: The portion of the text that is not yet covered by ``tree``. :type frontier: list(tuple(int)) :param frontier: A list of the locations within ``tree`` of all subtrees that have not yet been expanded, and all leaves that have not yet been matched. This list sorted in left-to-right order of location within the tree. """ # If the tree covers the text, and there's nothing left to # expand, then we've found a complete parse; return it. if len(remaining_text) == 0 and len(frontier) == 0: if self._trace: self._trace_succeed(tree, frontier) yield tree # If there's still text, but nothing left to expand, we failed. elif len(frontier) == 0: if self._trace: self._trace_backtrack(tree, frontier) # If the next element on the frontier is a tree, expand it. elif isinstance(tree[frontier[0]], Tree): yield from self._expand(remaining_text, tree, frontier) # If the next element on the frontier is a token, match it. else: yield from self._match(remaining_text, tree, frontier) def _match(self, rtext, tree, frontier): """ :rtype: iter(Tree) :return: an iterator of all parses that can be generated by matching the first element of ``frontier`` against the first token in ``rtext``. In particular, if the first element of ``frontier`` has the same type as the first token in ``rtext``, then substitute the token into ``tree``; and return all parses that can be generated by matching and expanding the remaining elements of ``frontier``. If the first element of ``frontier`` does not have the same type as the first token in ``rtext``, then return empty list. :type tree: Tree :param tree: A partial structure for the text that is currently being parsed. The elements of ``tree`` that are specified by ``frontier`` have not yet been expanded or matched. :type rtext: list(str) :param rtext: The portion of the text that is not yet covered by ``tree``. :type frontier: list of tuple of int :param frontier: A list of the locations within ``tree`` of all subtrees that have not yet been expanded, and all leaves that have not yet been matched. """ tree_leaf = tree[frontier[0]] if len(rtext) > 0 and tree_leaf == rtext[0]: # If it's a terminal that matches rtext[0], then substitute # in the token, and continue parsing. newtree = tree.copy(deep=True) newtree[frontier[0]] = rtext[0] if self._trace: self._trace_match(newtree, frontier[1:], rtext[0]) yield from self._parse(rtext[1:], newtree, frontier[1:]) else: # If it's a non-matching terminal, fail. if self._trace: self._trace_backtrack(tree, frontier, rtext[:1]) def _expand(self, remaining_text, tree, frontier, production=None): """ :rtype: iter(Tree) :return: An iterator of all parses that can be generated by expanding the first element of ``frontier`` with ``production``. In particular, if the first element of ``frontier`` is a subtree whose node type is equal to ``production``'s left hand side, then add a child to that subtree for each element of ``production``'s right hand side; and return all parses that can be generated by matching and expanding the remaining elements of ``frontier``. If the first element of ``frontier`` is not a subtree whose node type is equal to ``production``'s left hand side, then return an empty list. If ``production`` is not specified, then return a list of all parses that can be generated by expanding the first element of ``frontier`` with *any* CFG production. :type tree: Tree :param tree: A partial structure for the text that is currently being parsed. The elements of ``tree`` that are specified by ``frontier`` have not yet been expanded or matched. :type remaining_text: list(str) :param remaining_text: The portion of the text that is not yet covered by ``tree``. :type frontier: list(tuple(int)) :param frontier: A list of the locations within ``tree`` of all subtrees that have not yet been expanded, and all leaves that have not yet been matched. """ if production is None: productions = self._grammar.productions() else: productions = [production] for production in productions: lhs = production.lhs().symbol() if lhs == tree[frontier[0]].label(): subtree = self._production_to_tree(production) if frontier[0] == (): newtree = subtree else: newtree = tree.copy(deep=True) newtree[frontier[0]] = subtree new_frontier = [ frontier[0] + (i,) for i in range(len(production.rhs())) ] if self._trace: self._trace_expand(newtree, new_frontier, production) yield from self._parse( remaining_text, newtree, new_frontier + frontier[1:] ) def _production_to_tree(self, production): """ :rtype: Tree :return: The Tree that is licensed by ``production``. In particular, given the production ``[lhs -> elt[1] ... elt[n]]`` return a tree that has a node ``lhs.symbol``, and ``n`` children. For each nonterminal element ``elt[i]`` in the production, the tree token has a childless subtree with node value ``elt[i].symbol``; and for each terminal element ``elt[j]``, the tree token has a leaf token with type ``elt[j]``. :param production: The CFG production that licenses the tree token that should be returned. :type production: Production """ children = [] for elt in production.rhs(): if isinstance(elt, Nonterminal): children.append(Tree(elt.symbol(), [])) else: # This will be matched. children.append(elt) return Tree(production.lhs().symbol(), children) def trace(self, trace=2): """ Set the level of tracing output that should be generated when parsing a text. :type trace: int :param trace: The trace level. A trace level of ``0`` will generate no tracing output; and higher trace levels will produce more verbose tracing output. :rtype: None """ self._trace = trace def _trace_fringe(self, tree, treeloc=None): """ Print trace output displaying the fringe of ``tree``. The fringe of ``tree`` consists of all of its leaves and all of its childless subtrees. :rtype: None """ if treeloc == (): print("*", end=" ") if isinstance(tree, Tree): if len(tree) == 0: print(repr(Nonterminal(tree.label())), end=" ") for i in range(len(tree)): if treeloc is not None and i == treeloc[0]: self._trace_fringe(tree[i], treeloc[1:]) else: self._trace_fringe(tree[i]) else: print(repr(tree), end=" ") def _trace_tree(self, tree, frontier, operation): """ Print trace output displaying the parser's current state. :param operation: A character identifying the operation that generated the current state. :rtype: None """ if self._trace == 2: print(" %c [" % operation, end=" ") else: print(" [", end=" ") if len(frontier) > 0: self._trace_fringe(tree, frontier[0]) else: self._trace_fringe(tree) print("]") def _trace_start(self, tree, frontier, text): print("Parsing %r" % " ".join(text)) if self._trace > 2: print("Start:") if self._trace > 1: self._trace_tree(tree, frontier, " ") def _trace_expand(self, tree, frontier, production): if self._trace > 2: print("Expand: %s" % production) if self._trace > 1: self._trace_tree(tree, frontier, "E") def _trace_match(self, tree, frontier, tok): if self._trace > 2: print("Match: %r" % tok) if self._trace > 1: self._trace_tree(tree, frontier, "M") def _trace_succeed(self, tree, frontier): if self._trace > 2: print("GOOD PARSE:") if self._trace == 1: print("Found a parse:\n%s" % tree) if self._trace > 1: self._trace_tree(tree, frontier, "+") def _trace_backtrack(self, tree, frontier, toks=None): if self._trace > 2: if toks: print("Backtrack: %r match failed" % toks[0]) else: print("Backtrack") The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem: A demonstration of the recursive descent parser. Here is the function: def demo(): """ A demonstration of the recursive descent parser. """ from nltk import CFG, parse grammar = CFG.fromstring( """ S -> NP VP NP -> Det N | Det N PP VP -> V NP | V NP PP PP -> P NP NP -> 'I' N -> 'man' | 'park' | 'telescope' | 'dog' Det -> 'the' | 'a' P -> 'in' | 'with' V -> 'saw' """ ) for prod in grammar.productions(): print(prod) sent = "I saw a man in the park".split() parser = parse.RecursiveDescentParser(grammar, trace=2) for p in parser.parse(sent): print(p)
A demonstration of the recursive descent parser.
170,626
from time import perf_counter from nltk.featstruct import TYPE, FeatStruct, find_variables, unify from nltk.grammar import ( CFG, FeatStructNonterminal, Nonterminal, Production, is_nonterminal, is_terminal, ) from nltk.parse.chart import ( BottomUpPredictCombineRule, BottomUpPredictRule, CachedTopDownPredictRule, Chart, ChartParser, EdgeI, EmptyPredictRule, FundamentalRule, LeafInitRule, SingleEdgeFundamentalRule, TopDownInitRule, TreeEdge, ) from nltk.sem import logic from nltk.tree import Tree class FeatureChartParser(ChartParser): def __init__( self, grammar, strategy=BU_LC_FEATURE_STRATEGY, trace_chart_width=20, chart_class=FeatureChart, **parser_args, ): ChartParser.__init__( self, grammar, strategy=strategy, trace_chart_width=trace_chart_width, chart_class=chart_class, **parser_args, ) def demo_grammar(): from nltk.grammar import FeatureGrammar return FeatureGrammar.fromstring( """ S -> NP VP PP -> Prep NP NP -> NP PP VP -> VP PP VP -> Verb NP VP -> Verb NP -> Det[pl=?x] Noun[pl=?x] NP -> "John" NP -> "I" Det -> "the" Det -> "my" Det[-pl] -> "a" Noun[-pl] -> "dog" Noun[-pl] -> "cookie" Verb -> "ate" Verb -> "saw" Prep -> "with" Prep -> "under" """ ) import sys if sys.version_info >= (3, 9): from types import GenericAlias if sys.version_info >= (3, 8): def parse_qs( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ..., max_num_fields: Optional[int] = ..., ) -> Dict[AnyStr, List[AnyStr]]: ... def parse_qsl( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ..., max_num_fields: Optional[int] = ..., ) -> List[Tuple[AnyStr, AnyStr]]: ... else: def parse_qs( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ... ) -> Dict[AnyStr, List[AnyStr]]: ... def parse_qsl( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ... ) -> List[Tuple[AnyStr, AnyStr]]: ... def demo( print_times=True, print_grammar=True, print_trees=True, print_sentence=True, trace=1, parser=FeatureChartParser, sent="I saw John with a dog with my cookie", ): import sys import time print() grammar = demo_grammar() if print_grammar: print(grammar) print() print("*", parser.__name__) if print_sentence: print("Sentence:", sent) tokens = sent.split() t = perf_counter() cp = parser(grammar, trace=trace) chart = cp.chart_parse(tokens) trees = list(chart.parses(grammar.start())) if print_times: print("Time: %s" % (perf_counter() - t)) if print_trees: for tree in trees: print(tree) else: print("Nr trees:", len(trees))
null
170,627
from time import perf_counter from nltk.featstruct import TYPE, FeatStruct, find_variables, unify from nltk.grammar import ( CFG, FeatStructNonterminal, Nonterminal, Production, is_nonterminal, is_terminal, ) from nltk.parse.chart import ( BottomUpPredictCombineRule, BottomUpPredictRule, CachedTopDownPredictRule, Chart, ChartParser, EdgeI, EmptyPredictRule, FundamentalRule, LeafInitRule, SingleEdgeFundamentalRule, TopDownInitRule, TreeEdge, ) from nltk.sem import logic from nltk.tree import Tree def run_profile(): import profile profile.run("for i in range(1): demo()", "/tmp/profile.out") import pstats p = pstats.Stats("/tmp/profile.out") p.strip_dirs().sort_stats("time", "cum").print_stats(60) p.strip_dirs().sort_stats("cum", "time").print_stats(60)
null
170,628
from nltk.parse.api import ParserI from nltk.tree import Tree def _ensure_bllip_import_or_error(): pass
null
170,629
from nltk.parse.api import ParserI from nltk.tree import Tree def _ensure_bllip_import_or_error(ie=ie): raise ImportError("Couldn't import bllipparser module: %s" % ie)
null
170,630
from nltk.parse.api import ParserI from nltk.tree import Tree def _ensure_ascii(words): try: for i, word in enumerate(words): word.encode("ascii") except UnicodeEncodeError as e: raise ValueError( f"Token {i} ({word!r}) is non-ASCII. BLLIP Parser " "currently doesn't support non-ASCII inputs." ) from e
null
170,631
from nltk.parse.api import ParserI from nltk.tree import Tree def _scored_parse_to_nltk_tree(scored_parse): return Tree.fromstring(str(scored_parse.ptb_parse))
null
170,632
from nltk.parse.api import ParserI from nltk.tree import Tree class BllipParser(ParserI): """ Interface for parsing with BLLIP Parser. BllipParser objects can be constructed with the ``BllipParser.from_unified_model_dir`` class method or manually using the ``BllipParser`` constructor. """ def __init__( self, parser_model=None, reranker_features=None, reranker_weights=None, parser_options=None, reranker_options=None, ): """ Load a BLLIP Parser model from scratch. You'll typically want to use the ``from_unified_model_dir()`` class method to construct this object. :param parser_model: Path to parser model directory :type parser_model: str :param reranker_features: Path the reranker model's features file :type reranker_features: str :param reranker_weights: Path the reranker model's weights file :type reranker_weights: str :param parser_options: optional dictionary of parser options, see ``bllipparser.RerankingParser.RerankingParser.load_parser_options()`` for more information. :type parser_options: dict(str) :param reranker_options: optional dictionary of reranker options, see ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()`` for more information. :type reranker_options: dict(str) """ _ensure_bllip_import_or_error() parser_options = parser_options or {} reranker_options = reranker_options or {} self.rrp = RerankingParser() self.rrp.load_parser_model(parser_model, **parser_options) if reranker_features and reranker_weights: self.rrp.load_reranker_model( features_filename=reranker_features, weights_filename=reranker_weights, **reranker_options, ) def parse(self, sentence): """ Use BLLIP Parser to parse a sentence. Takes a sentence as a list of words; it will be automatically tagged with this BLLIP Parser instance's tagger. :return: An iterator that generates parse trees for the sentence from most likely to least likely. :param sentence: The sentence to be parsed :type sentence: list(str) :rtype: iter(Tree) """ _ensure_ascii(sentence) nbest_list = self.rrp.parse(sentence) for scored_parse in nbest_list: yield _scored_parse_to_nltk_tree(scored_parse) def tagged_parse(self, word_and_tag_pairs): """ Use BLLIP to parse a sentence. Takes a sentence as a list of (word, tag) tuples; the sentence must have already been tokenized and tagged. BLLIP will attempt to use the tags provided but may use others if it can't come up with a complete parse subject to those constraints. You may also specify a tag as ``None`` to leave a token's tag unconstrained. :return: An iterator that generates parse trees for the sentence from most likely to least likely. :param sentence: Input sentence to parse as (word, tag) pairs :type sentence: list(tuple(str, str)) :rtype: iter(Tree) """ words = [] tag_map = {} for i, (word, tag) in enumerate(word_and_tag_pairs): words.append(word) if tag is not None: tag_map[i] = tag _ensure_ascii(words) nbest_list = self.rrp.parse_tagged(words, tag_map) for scored_parse in nbest_list: yield _scored_parse_to_nltk_tree(scored_parse) def from_unified_model_dir( cls, model_dir, parser_options=None, reranker_options=None ): """ Create a ``BllipParser`` object from a unified parsing model directory. Unified parsing model directories are a standardized way of storing BLLIP parser and reranker models together on disk. See ``bllipparser.RerankingParser.get_unified_model_parameters()`` for more information about unified model directories. :return: A ``BllipParser`` object using the parser and reranker models in the model directory. :param model_dir: Path to the unified model directory. :type model_dir: str :param parser_options: optional dictionary of parser options, see ``bllipparser.RerankingParser.RerankingParser.load_parser_options()`` for more information. :type parser_options: dict(str) :param reranker_options: optional dictionary of reranker options, see ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()`` for more information. :type reranker_options: dict(str) :rtype: BllipParser """ ( parser_model_dir, reranker_features_filename, reranker_weights_filename, ) = get_unified_model_parameters(model_dir) return cls( parser_model_dir, reranker_features_filename, reranker_weights_filename, parser_options, reranker_options, ) def find(resource_name, paths=None): """ Find the given resource by searching through the directories and zip files in paths, where a None or empty string specifies an absolute path. Returns a corresponding path name. If the given resource is not found, raise a ``LookupError``, whose message gives a pointer to the installation instructions for the NLTK downloader. Zip File Handling: - If ``resource_name`` contains a component with a ``.zip`` extension, then it is assumed to be a zipfile; and the remaining path components are used to look inside the zipfile. - If any element of ``nltk.data.path`` has a ``.zip`` extension, then it is assumed to be a zipfile. - If a given resource name that does not contain any zipfile component is not found initially, then ``find()`` will make a second attempt to find that resource, by replacing each component *p* in the path with *p.zip/p*. For example, this allows ``find()`` to map the resource name ``corpora/chat80/cities.pl`` to a zip file path pointer to ``corpora/chat80.zip/chat80/cities.pl``. - When using ``find()`` to locate a directory contained in a zipfile, the resource name must end with the forward slash character. Otherwise, ``find()`` will not locate the directory. :type resource_name: str or unicode :param resource_name: The name of the resource to search for. Resource names are posix-style relative path names, such as ``corpora/brown``. Directory names will be automatically converted to a platform-appropriate path separator. :rtype: str """ resource_name = normalize_resource_name(resource_name, True) # Resolve default paths at runtime in-case the user overrides # nltk.data.path if paths is None: paths = path # Check if the resource name includes a zipfile name m = re.match(r"(.*\.zip)/?(.*)$|", resource_name) zipfile, zipentry = m.groups() # Check each item in our path for path_ in paths: # Is the path item a zipfile? if path_ and (os.path.isfile(path_) and path_.endswith(".zip")): try: return ZipFilePathPointer(path_, resource_name) except OSError: # resource not in zipfile continue # Is the path item a directory or is resource_name an absolute path? elif not path_ or os.path.isdir(path_): if zipfile is None: p = os.path.join(path_, url2pathname(resource_name)) if os.path.exists(p): if p.endswith(".gz"): return GzipFileSystemPathPointer(p) else: return FileSystemPathPointer(p) else: p = os.path.join(path_, url2pathname(zipfile)) if os.path.exists(p): try: return ZipFilePathPointer(p, zipentry) except OSError: # resource not in zipfile continue # Fallback: if the path doesn't include a zip file, then try # again, assuming that one of the path components is inside a # zipfile of the same name. if zipfile is None: pieces = resource_name.split("/") for i in range(len(pieces)): modified_name = "/".join(pieces[:i] + [pieces[i] + ".zip"] + pieces[i:]) try: return find(modified_name, paths) except LookupError: pass # Identify the package (i.e. the .zip file) to download. resource_zipname = resource_name.split("/")[1] if resource_zipname.endswith(".zip"): resource_zipname = resource_zipname.rpartition(".")[0] # Display a friendly error message if the resource wasn't found: msg = str( "Resource \33[93m{resource}\033[0m not found.\n" "Please use the NLTK Downloader to obtain the resource:\n\n" "\33[31m" # To display red text in terminal. ">>> import nltk\n" ">>> nltk.download('{resource}')\n" "\033[0m" ).format(resource=resource_zipname) msg = textwrap_indent(msg) msg += "\n For more information see: https://www.nltk.org/data.html\n" msg += "\n Attempted to load \33[93m{resource_name}\033[0m\n".format( resource_name=resource_name ) msg += "\n Searched in:" + "".join("\n - %r" % d for d in paths) sep = "*" * 70 resource_not_found = f"\n{sep}\n{msg}\n{sep}\n" raise LookupError(resource_not_found) The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem: This assumes the Python module bllipparser is installed. Here is the function: def demo(): """This assumes the Python module bllipparser is installed.""" # download and install a basic unified parsing model (Wall Street Journal) # sudo python -m nltk.downloader bllip_wsj_no_aux from nltk.data import find model_dir = find("models/bllip_wsj_no_aux").path print("Loading BLLIP Parsing models...") # the easiest way to get started is to use a unified model bllip = BllipParser.from_unified_model_dir(model_dir) print("Done.") sentence1 = "British left waffles on Falklands .".split() sentence2 = "I saw the man with the telescope .".split() # this sentence is known to fail under the WSJ parsing model fail1 = "# ! ? : -".split() for sentence in (sentence1, sentence2, fail1): print("Sentence: %r" % " ".join(sentence)) try: tree = next(bllip.parse(sentence)) print(tree) except StopIteration: print("(parse failed)") # n-best parsing demo for i, parse in enumerate(bllip.parse(sentence1)): print("parse %d:\n%s" % (i, parse)) # using external POS tag constraints print( "forcing 'tree' to be 'NN':", next(bllip.tagged_parse([("A", None), ("tree", "NN")])), ) print( "forcing 'A' to be 'DT' and 'tree' to be 'NNP':", next(bllip.tagged_parse([("A", "DT"), ("tree", "NNP")])), ) # constraints don't have to make sense... (though on more complicated # sentences, they may cause the parse to fail) print( "forcing 'A' to be 'NNP':", next(bllip.tagged_parse([("A", "NNP"), ("tree", None)])), )
This assumes the Python module bllipparser is installed.
170,633
import subprocess import warnings from collections import defaultdict from itertools import chain from pprint import pformat from nltk.internals import find_binary from nltk.tree import Tree def find_binary( name, path_to_bin=None, env_vars=(), searchpath=(), binary_names=None, url=None, verbose=False, ): return next( find_binary_iter( name, path_to_bin, env_vars, searchpath, binary_names, url, verbose ) ) The provided code snippet includes necessary dependencies for implementing the `dot2img` function. Write a Python function `def dot2img(dot_string, t="svg")` to solve the following problem: Create image representation fom dot_string, using the 'dot' program from the Graphviz package. Use the 't' argument to specify the image file format, for ex. 'jpeg', 'eps', 'json', 'png' or 'webp' (Running 'dot -T:' lists all available formats). Note that the "capture_output" option of subprocess.run() is only available with text formats (like svg), but not with binary image formats (like png). Here is the function: def dot2img(dot_string, t="svg"): """ Create image representation fom dot_string, using the 'dot' program from the Graphviz package. Use the 't' argument to specify the image file format, for ex. 'jpeg', 'eps', 'json', 'png' or 'webp' (Running 'dot -T:' lists all available formats). Note that the "capture_output" option of subprocess.run() is only available with text formats (like svg), but not with binary image formats (like png). """ try: find_binary("dot") try: if t in ["dot", "dot_json", "json", "svg"]: proc = subprocess.run( ["dot", "-T%s" % t], capture_output=True, input=dot_string, text=True, ) else: proc = subprocess.run( ["dot", "-T%s" % t], input=bytes(dot_string, encoding="utf8"), ) return proc.stdout except: raise Exception( "Cannot create image representation by running dot from string: {}" "".format(dot_string) ) except OSError as e: raise Exception("Cannot find the dot binary from Graphviz package") from e
Create image representation fom dot_string, using the 'dot' program from the Graphviz package. Use the 't' argument to specify the image file format, for ex. 'jpeg', 'eps', 'json', 'png' or 'webp' (Running 'dot -T:' lists all available formats). Note that the "capture_output" option of subprocess.run() is only available with text formats (like svg), but not with binary image formats (like png).
170,634
import subprocess import warnings from collections import defaultdict from itertools import chain from pprint import pformat from nltk.internals import find_binary from nltk.tree import Tree def malt_demo(nx=False): """ A demonstration of the result of reading a dependency version of the first sentence of the Penn Treebank. """ dg = DependencyGraph( """Pierre NNP 2 NMOD Vinken NNP 8 SUB , , 2 P 61 CD 5 NMOD years NNS 6 AMOD old JJ 2 NMOD , , 2 P will MD 0 ROOT join VB 8 VC the DT 11 NMOD board NN 9 OBJ as IN 9 VMOD a DT 15 NMOD nonexecutive JJ 15 NMOD director NN 12 PMOD Nov. NNP 9 VMOD 29 CD 16 NMOD . . 9 VMOD """ ) tree = dg.tree() tree.pprint() if nx: # currently doesn't work import networkx from matplotlib import pylab g = dg.nx_graph() g.info() pos = networkx.spring_layout(g, dim=1) networkx.draw_networkx_nodes(g, pos, node_size=50) # networkx.draw_networkx_edges(g, pos, edge_color='k', width=8) networkx.draw_networkx_labels(g, pos, dg.nx_labels) pylab.xticks([]) pylab.yticks([]) pylab.savefig("tree.png") pylab.show() def conll_demo(): """ A demonstration of how to read a string representation of a CoNLL format dependency tree. """ dg = DependencyGraph(conll_data1) tree = dg.tree() tree.pprint() print(dg) print(dg.to_conll(4)) def conll_file_demo(): print("Mass conll_read demo...") graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry] for graph in graphs: tree = graph.tree() print("\n") tree.pprint() def cycle_finding_demo(): dg = DependencyGraph(treebank_data) print(dg.contains_cycle()) cyclic_dg = DependencyGraph() cyclic_dg.add_node({"word": None, "deps": [1], "rel": "TOP", "address": 0}) cyclic_dg.add_node({"word": None, "deps": [2], "rel": "NTOP", "address": 1}) cyclic_dg.add_node({"word": None, "deps": [4], "rel": "NTOP", "address": 2}) cyclic_dg.add_node({"word": None, "deps": [1], "rel": "NTOP", "address": 3}) cyclic_dg.add_node({"word": None, "deps": [3], "rel": "NTOP", "address": 4}) print(cyclic_dg.contains_cycle()) def demo(): malt_demo() conll_demo() conll_file_demo() cycle_finding_demo()
null
170,635
import random from functools import reduce from nltk.grammar import PCFG, Nonterminal from nltk.parse.api import ParserI from nltk.parse.chart import AbstractChartRule, Chart, LeafEdge, TreeEdge from nltk.tree import ProbabilisticTree, Tree class InsideChartParser(BottomUpProbabilisticChartParser): """ A bottom-up parser for ``PCFG`` grammars that tries edges in descending order of the inside probabilities of their trees. The "inside probability" of a tree is simply the probability of the entire tree, ignoring its context. In particular, the inside probability of a tree generated by production *p* with children *c[1], c[2], ..., c[n]* is *P(p)P(c[1])P(c[2])...P(c[n])*; and the inside probability of a token is 1 if it is present in the text, and 0 if it is absent. This sorting order results in a type of lowest-cost-first search strategy. """ # Inherit constructor. def sort_queue(self, queue, chart): """ Sort the given queue of edges, in descending order of the inside probabilities of the edges' trees. :param queue: The queue of ``Edge`` objects to sort. Each edge in this queue is an edge that could be added to the chart by the fundamental rule; but that has not yet been added. :type queue: list(Edge) :param chart: The chart being used to parse the text. This chart can be used to provide extra information for sorting the queue. :type chart: Chart :rtype: None """ queue.sort(key=lambda edge: edge.prob()) class RandomChartParser(BottomUpProbabilisticChartParser): """ A bottom-up parser for ``PCFG`` grammars that tries edges in random order. This sorting order results in a random search strategy. """ # Inherit constructor def sort_queue(self, queue, chart): i = random.randint(0, len(queue) - 1) (queue[-1], queue[i]) = (queue[i], queue[-1]) class UnsortedChartParser(BottomUpProbabilisticChartParser): """ A bottom-up parser for ``PCFG`` grammars that tries edges in whatever order. """ # Inherit constructor def sort_queue(self, queue, chart): return class LongestChartParser(BottomUpProbabilisticChartParser): """ A bottom-up parser for ``PCFG`` grammars that tries longer edges before shorter ones. This sorting order results in a type of best-first search strategy. """ # Inherit constructor def sort_queue(self, queue, chart): queue.sort(key=lambda edge: edge.length()) def reduce(function: Callable[[_T, _S], _T], sequence: Iterable[_S], initial: _T) -> _T: ... def reduce(function: Callable[[_T, _T], _T], sequence: Iterable[_T]) -> _T: ... class PCFG(CFG): """ A probabilistic context-free grammar. A PCFG consists of a start state and a set of productions with probabilities. The set of terminals and nonterminals is implicitly specified by the productions. PCFG productions use the ``ProbabilisticProduction`` class. ``PCFGs`` impose the constraint that the set of productions with any given left-hand-side must have probabilities that sum to 1 (allowing for a small margin of error). If you need efficient key-based access to productions, you can use a subclass to implement it. :type EPSILON: float :cvar EPSILON: The acceptable margin of error for checking that productions with a given left-hand side have probabilities that sum to 1. """ EPSILON = 0.01 def __init__(self, start, productions, calculate_leftcorners=True): """ Create a new context-free grammar, from the given start state and set of ``ProbabilisticProductions``. :param start: The start symbol :type start: Nonterminal :param productions: The list of productions that defines the grammar :type productions: list(Production) :raise ValueError: if the set of productions with any left-hand-side do not have probabilities that sum to a value within EPSILON of 1. :param calculate_leftcorners: False if we don't want to calculate the leftcorner relation. In that case, some optimized chart parsers won't work. :type calculate_leftcorners: bool """ CFG.__init__(self, start, productions, calculate_leftcorners) # Make sure that the probabilities sum to one. probs = {} for production in productions: probs[production.lhs()] = probs.get(production.lhs(), 0) + production.prob() for (lhs, p) in probs.items(): if not ((1 - PCFG.EPSILON) < p < (1 + PCFG.EPSILON)): raise ValueError("Productions for %r do not sum to 1" % lhs) def fromstring(cls, input, encoding=None): """ Return a probabilistic context-free grammar corresponding to the input string(s). :param input: a grammar, either in the form of a string or else as a list of strings. """ start, productions = read_grammar( input, standard_nonterm_parser, probabilistic=True, encoding=encoding ) return cls(start, productions) import sys if sys.version_info >= (3, 9): from types import GenericAlias if sys.version_info >= (3, 8): def parse_qs( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ..., max_num_fields: Optional[int] = ..., ) -> Dict[AnyStr, List[AnyStr]]: ... def parse_qsl( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ..., max_num_fields: Optional[int] = ..., ) -> List[Tuple[AnyStr, AnyStr]]: ... else: def parse_qs( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ... ) -> Dict[AnyStr, List[AnyStr]]: ... def parse_qsl( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ... ) -> List[Tuple[AnyStr, AnyStr]]: ... def draw_trees(*trees): """ Open a new window containing a graphical diagram of the given trees. :rtype: None """ TreeView(*trees).mainloop() return The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo(choice=None, draw_parses=None, print_parses=None)` to solve the following problem: A demonstration of the probabilistic parsers. The user is prompted to select which demo to run, and how many parses should be found; and then each parser is run on the same demo, and a summary of the results are displayed. Here is the function: def demo(choice=None, draw_parses=None, print_parses=None): """ A demonstration of the probabilistic parsers. The user is prompted to select which demo to run, and how many parses should be found; and then each parser is run on the same demo, and a summary of the results are displayed. """ import sys import time from nltk import tokenize from nltk.parse import pchart # Define two demos. Each demo has a sentence and a grammar. toy_pcfg1 = PCFG.fromstring( """ S -> NP VP [1.0] NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] Det -> 'the' [0.8] | 'my' [0.2] N -> 'man' [0.5] | 'telescope' [0.5] VP -> VP PP [0.1] | V NP [0.7] | V [0.2] V -> 'ate' [0.35] | 'saw' [0.65] PP -> P NP [1.0] P -> 'with' [0.61] | 'under' [0.39] """ ) toy_pcfg2 = PCFG.fromstring( """ S -> NP VP [1.0] VP -> V NP [.59] VP -> V [.40] VP -> VP PP [.01] NP -> Det N [.41] NP -> Name [.28] NP -> NP PP [.31] PP -> P NP [1.0] V -> 'saw' [.21] V -> 'ate' [.51] V -> 'ran' [.28] N -> 'boy' [.11] N -> 'cookie' [.12] N -> 'table' [.13] N -> 'telescope' [.14] N -> 'hill' [.5] Name -> 'Jack' [.52] Name -> 'Bob' [.48] P -> 'with' [.61] P -> 'under' [.39] Det -> 'the' [.41] Det -> 'a' [.31] Det -> 'my' [.28] """ ) demos = [ ("I saw John with my telescope", toy_pcfg1), ("the boy saw Jack with Bob under the table with a telescope", toy_pcfg2), ] if choice is None: # Ask the user which demo they want to use. print() for i in range(len(demos)): print(f"{i + 1:>3}: {demos[i][0]}") print(" %r" % demos[i][1]) print() print("Which demo (%d-%d)? " % (1, len(demos)), end=" ") choice = int(sys.stdin.readline().strip()) - 1 try: sent, grammar = demos[choice] except: print("Bad sentence number") return # Tokenize the sentence. tokens = sent.split() # Define a list of parsers. We'll use all parsers. parsers = [ pchart.InsideChartParser(grammar), pchart.RandomChartParser(grammar), pchart.UnsortedChartParser(grammar), pchart.LongestChartParser(grammar), pchart.InsideChartParser(grammar, beam_size=len(tokens) + 1), # was BeamParser ] # Run the parsers on the tokenized sentence. times = [] average_p = [] num_parses = [] all_parses = {} for parser in parsers: print(f"\ns: {sent}\nparser: {parser}\ngrammar: {grammar}") parser.trace(3) t = time.time() parses = list(parser.parse(tokens)) times.append(time.time() - t) p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) if parses else 0 average_p.append(p) num_parses.append(len(parses)) for p in parses: all_parses[p.freeze()] = 1 # Print some summary statistics print() print(" Parser Beam | Time (secs) # Parses Average P(parse)") print("------------------------+------------------------------------------") for i in range(len(parsers)): print( "%18s %4d |%11.4f%11d%19.14f" % ( parsers[i].__class__.__name__, parsers[i].beam_size, times[i], num_parses[i], average_p[i], ) ) parses = all_parses.keys() if parses: p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) else: p = 0 print("------------------------+------------------------------------------") print("%18s |%11s%11d%19.14f" % ("(All Parses)", "n/a", len(parses), p)) if draw_parses is None: # Ask the user if we should draw the parses. print() print("Draw parses (y/n)? ", end=" ") draw_parses = sys.stdin.readline().strip().lower().startswith("y") if draw_parses: from nltk.draw.tree import draw_trees print(" please wait...") draw_trees(*parses) if print_parses is None: # Ask the user if we should print the parses. print() print("Print parses (y/n)? ", end=" ") print_parses = sys.stdin.readline().strip().lower().startswith("y") if print_parses: for parse in parses: print(parse)
A demonstration of the probabilistic parsers. The user is prompted to select which demo to run, and how many parses should be found; and then each parser is run on the same demo, and a summary of the results are displayed.
170,636
import pickle import tempfile from copy import deepcopy from operator import itemgetter from os import remove from nltk.parse import DependencyEvaluator, DependencyGraph, ParserI The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem: >>> from nltk.parse import DependencyGraph, DependencyEvaluator >>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition >>> gold_sent = DependencyGraph(\""" ... Economic JJ 2 ATT ... news NN 3 SBJ ... has VBD 0 ROOT ... little JJ 5 ATT ... effect NN 3 OBJ ... on IN 5 ATT ... financial JJ 8 ATT ... markets NNS 6 PC ... . . 3 PU ... \""") >>> conf = Configuration(gold_sent) ###################### Check the Initial Feature ######################## >>> print(', '.join(conf.extract_features())) STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ ###################### Check The Transition ####################### Check the Initialized Configuration >>> print(conf) Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : [] A. Do some transition checks for ARC-STANDARD >>> operation = Transition('arc-standard') >>> operation.shift(conf) >>> operation.left_arc(conf, "ATT") >>> operation.shift(conf) >>> operation.left_arc(conf,"SBJ") >>> operation.shift(conf) >>> operation.shift(conf) >>> operation.left_arc(conf, "ATT") >>> operation.shift(conf) >>> operation.shift(conf) >>> operation.shift(conf) >>> operation.left_arc(conf, "ATT") Middle Configuration and Features Check >>> print(conf) Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)] >>> print(', '.join(conf.extract_features())) STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT >>> operation.right_arc(conf, "PC") >>> operation.right_arc(conf, "ATT") >>> operation.right_arc(conf, "OBJ") >>> operation.shift(conf) >>> operation.right_arc(conf, "PU") >>> operation.right_arc(conf, "ROOT") >>> operation.shift(conf) Terminated Configuration Check >>> print(conf) Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)] B. Do some transition checks for ARC-EAGER >>> conf = Configuration(gold_sent) >>> operation = Transition('arc-eager') >>> operation.shift(conf) >>> operation.left_arc(conf,'ATT') >>> operation.shift(conf) >>> operation.left_arc(conf,'SBJ') >>> operation.right_arc(conf,'ROOT') >>> operation.shift(conf) >>> operation.left_arc(conf,'ATT') >>> operation.right_arc(conf,'OBJ') >>> operation.right_arc(conf,'ATT') >>> operation.shift(conf) >>> operation.left_arc(conf,'ATT') >>> operation.right_arc(conf,'PC') >>> operation.reduce(conf) >>> operation.reduce(conf) >>> operation.reduce(conf) >>> operation.right_arc(conf,'PU') >>> print(conf) Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)] ###################### Check The Training Function ####################### A. Check the ARC-STANDARD training >>> import tempfile >>> import os >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False) >>> parser_std = TransitionParser('arc-standard') >>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file))) Number of training examples : 1 Number of valid (projective) examples : 1 SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT >>> parser_std.train([gold_sent],'temp.arcstd.model', verbose=False) Number of training examples : 1 Number of valid (projective) examples : 1 >>> input_file.close() >>> remove(input_file.name) B. Check the ARC-EAGER training >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False) >>> parser_eager = TransitionParser('arc-eager') >>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file))) Number of training examples : 1 Number of valid (projective) examples : 1 SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU >>> parser_eager.train([gold_sent],'temp.arceager.model', verbose=False) Number of training examples : 1 Number of valid (projective) examples : 1 >>> input_file.close() >>> remove(input_file.name) ###################### Check The Parsing Function ######################## A. Check the ARC-STANDARD parser >>> result = parser_std.parse([gold_sent], 'temp.arcstd.model') >>> de = DependencyEvaluator(result, [gold_sent]) >>> de.eval() >= (0, 0) True B. Check the ARC-EAGER parser >>> result = parser_eager.parse([gold_sent], 'temp.arceager.model') >>> de = DependencyEvaluator(result, [gold_sent]) >>> de.eval() >= (0, 0) True Remove test temporary files >>> remove('temp.arceager.model') >>> remove('temp.arcstd.model') Note that result is very poor because of only one training example. Here is the function: def demo(): """ >>> from nltk.parse import DependencyGraph, DependencyEvaluator >>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition >>> gold_sent = DependencyGraph(\""" ... Economic JJ 2 ATT ... news NN 3 SBJ ... has VBD 0 ROOT ... little JJ 5 ATT ... effect NN 3 OBJ ... on IN 5 ATT ... financial JJ 8 ATT ... markets NNS 6 PC ... . . 3 PU ... \""") >>> conf = Configuration(gold_sent) ###################### Check the Initial Feature ######################## >>> print(', '.join(conf.extract_features())) STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ ###################### Check The Transition ####################### Check the Initialized Configuration >>> print(conf) Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : [] A. Do some transition checks for ARC-STANDARD >>> operation = Transition('arc-standard') >>> operation.shift(conf) >>> operation.left_arc(conf, "ATT") >>> operation.shift(conf) >>> operation.left_arc(conf,"SBJ") >>> operation.shift(conf) >>> operation.shift(conf) >>> operation.left_arc(conf, "ATT") >>> operation.shift(conf) >>> operation.shift(conf) >>> operation.shift(conf) >>> operation.left_arc(conf, "ATT") Middle Configuration and Features Check >>> print(conf) Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)] >>> print(', '.join(conf.extract_features())) STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT >>> operation.right_arc(conf, "PC") >>> operation.right_arc(conf, "ATT") >>> operation.right_arc(conf, "OBJ") >>> operation.shift(conf) >>> operation.right_arc(conf, "PU") >>> operation.right_arc(conf, "ROOT") >>> operation.shift(conf) Terminated Configuration Check >>> print(conf) Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)] B. Do some transition checks for ARC-EAGER >>> conf = Configuration(gold_sent) >>> operation = Transition('arc-eager') >>> operation.shift(conf) >>> operation.left_arc(conf,'ATT') >>> operation.shift(conf) >>> operation.left_arc(conf,'SBJ') >>> operation.right_arc(conf,'ROOT') >>> operation.shift(conf) >>> operation.left_arc(conf,'ATT') >>> operation.right_arc(conf,'OBJ') >>> operation.right_arc(conf,'ATT') >>> operation.shift(conf) >>> operation.left_arc(conf,'ATT') >>> operation.right_arc(conf,'PC') >>> operation.reduce(conf) >>> operation.reduce(conf) >>> operation.reduce(conf) >>> operation.right_arc(conf,'PU') >>> print(conf) Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)] ###################### Check The Training Function ####################### A. Check the ARC-STANDARD training >>> import tempfile >>> import os >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False) >>> parser_std = TransitionParser('arc-standard') >>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file))) Number of training examples : 1 Number of valid (projective) examples : 1 SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT >>> parser_std.train([gold_sent],'temp.arcstd.model', verbose=False) Number of training examples : 1 Number of valid (projective) examples : 1 >>> input_file.close() >>> remove(input_file.name) B. Check the ARC-EAGER training >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False) >>> parser_eager = TransitionParser('arc-eager') >>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file))) Number of training examples : 1 Number of valid (projective) examples : 1 SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU >>> parser_eager.train([gold_sent],'temp.arceager.model', verbose=False) Number of training examples : 1 Number of valid (projective) examples : 1 >>> input_file.close() >>> remove(input_file.name) ###################### Check The Parsing Function ######################## A. Check the ARC-STANDARD parser >>> result = parser_std.parse([gold_sent], 'temp.arcstd.model') >>> de = DependencyEvaluator(result, [gold_sent]) >>> de.eval() >= (0, 0) True B. Check the ARC-EAGER parser >>> result = parser_eager.parse([gold_sent], 'temp.arceager.model') >>> de = DependencyEvaluator(result, [gold_sent]) >>> de.eval() >= (0, 0) True Remove test temporary files >>> remove('temp.arceager.model') >>> remove('temp.arcstd.model') Note that result is very poor because of only one training example. """
>>> from nltk.parse import DependencyGraph, DependencyEvaluator >>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition >>> gold_sent = DependencyGraph(\""" ... Economic JJ 2 ATT ... news NN 3 SBJ ... has VBD 0 ROOT ... little JJ 5 ATT ... effect NN 3 OBJ ... on IN 5 ATT ... financial JJ 8 ATT ... markets NNS 6 PC ... . . 3 PU ... \""") >>> conf = Configuration(gold_sent) ###################### Check the Initial Feature ######################## >>> print(', '.join(conf.extract_features())) STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ ###################### Check The Transition ####################### Check the Initialized Configuration >>> print(conf) Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : [] A. Do some transition checks for ARC-STANDARD >>> operation = Transition('arc-standard') >>> operation.shift(conf) >>> operation.left_arc(conf, "ATT") >>> operation.shift(conf) >>> operation.left_arc(conf,"SBJ") >>> operation.shift(conf) >>> operation.shift(conf) >>> operation.left_arc(conf, "ATT") >>> operation.shift(conf) >>> operation.shift(conf) >>> operation.shift(conf) >>> operation.left_arc(conf, "ATT") Middle Configuration and Features Check >>> print(conf) Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)] >>> print(', '.join(conf.extract_features())) STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT >>> operation.right_arc(conf, "PC") >>> operation.right_arc(conf, "ATT") >>> operation.right_arc(conf, "OBJ") >>> operation.shift(conf) >>> operation.right_arc(conf, "PU") >>> operation.right_arc(conf, "ROOT") >>> operation.shift(conf) Terminated Configuration Check >>> print(conf) Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)] B. Do some transition checks for ARC-EAGER >>> conf = Configuration(gold_sent) >>> operation = Transition('arc-eager') >>> operation.shift(conf) >>> operation.left_arc(conf,'ATT') >>> operation.shift(conf) >>> operation.left_arc(conf,'SBJ') >>> operation.right_arc(conf,'ROOT') >>> operation.shift(conf) >>> operation.left_arc(conf,'ATT') >>> operation.right_arc(conf,'OBJ') >>> operation.right_arc(conf,'ATT') >>> operation.shift(conf) >>> operation.left_arc(conf,'ATT') >>> operation.right_arc(conf,'PC') >>> operation.reduce(conf) >>> operation.reduce(conf) >>> operation.reduce(conf) >>> operation.right_arc(conf,'PU') >>> print(conf) Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)] ###################### Check The Training Function ####################### A. Check the ARC-STANDARD training >>> import tempfile >>> import os >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False) >>> parser_std = TransitionParser('arc-standard') >>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file))) Number of training examples : 1 Number of valid (projective) examples : 1 SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT >>> parser_std.train([gold_sent],'temp.arcstd.model', verbose=False) Number of training examples : 1 Number of valid (projective) examples : 1 >>> input_file.close() >>> remove(input_file.name) B. Check the ARC-EAGER training >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False) >>> parser_eager = TransitionParser('arc-eager') >>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file))) Number of training examples : 1 Number of valid (projective) examples : 1 SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU >>> parser_eager.train([gold_sent],'temp.arceager.model', verbose=False) Number of training examples : 1 Number of valid (projective) examples : 1 >>> input_file.close() >>> remove(input_file.name) ###################### Check The Parsing Function ######################## A. Check the ARC-STANDARD parser >>> result = parser_std.parse([gold_sent], 'temp.arcstd.model') >>> de = DependencyEvaluator(result, [gold_sent]) >>> de.eval() >= (0, 0) True B. Check the ARC-EAGER parser >>> result = parser_eager.parse([gold_sent], 'temp.arceager.model') >>> de = DependencyEvaluator(result, [gold_sent]) >>> de.eval() >= (0, 0) True Remove test temporary files >>> remove('temp.arceager.model') >>> remove('temp.arcstd.model') Note that result is very poor because of only one training example.
170,637
from collections import defaultdict from functools import total_ordering from itertools import chain from nltk.grammar import ( DependencyGrammar, DependencyProduction, ProbabilisticDependencyGrammar, ) from nltk.internals import raise_unorderable_types from nltk.parse.dependencygraph import DependencyGraph def projective_rule_parse_demo(): """ A demonstration showing the creation and use of a ``DependencyGrammar`` to perform a projective dependency parse. """ grammar = DependencyGrammar.fromstring( """ 'scratch' -> 'cats' | 'walls' 'walls' -> 'the' 'cats' -> 'the' """ ) print(grammar) pdp = ProjectiveDependencyParser(grammar) trees = pdp.parse(["the", "cats", "scratch", "the", "walls"]) for tree in trees: print(tree) def projective_prob_parse_demo(): """ A demo showing the training and use of a projective dependency parser. """ from nltk.parse.dependencygraph import conll_data2 graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry] ppdp = ProbabilisticProjectiveDependencyParser() print("Training Probabilistic Projective Dependency Parser...") ppdp.train(graphs) sent = ["Cathy", "zag", "hen", "wild", "zwaaien", "."] print("Parsing '", " ".join(sent), "'...") print("Parse:") for tree in ppdp.parse(sent): print(tree) def demo(): projective_rule_parse_demo() # arity_parse_demo() projective_prob_parse_demo()
null
170,638
from collections import defaultdict from functools import total_ordering from itertools import chain from nltk.grammar import ( DependencyGrammar, DependencyProduction, ProbabilisticDependencyGrammar, ) from nltk.internals import raise_unorderable_types from nltk.parse.dependencygraph import DependencyGraph class ProjectiveDependencyParser: """ A projective, rule-based, dependency parser. A ProjectiveDependencyParser is created with a DependencyGrammar, a set of productions specifying word-to-word dependency relations. The parse() method will then return the set of all parses, in tree representation, for a given input sequence of tokens. Each parse must meet the requirements of the both the grammar and the projectivity constraint which specifies that the branches of the dependency tree are not allowed to cross. Alternatively, this can be understood as stating that each parent node and its children in the parse tree form a continuous substring of the input sequence. """ def __init__(self, dependency_grammar): """ Create a new ProjectiveDependencyParser, from a word-to-word dependency grammar ``DependencyGrammar``. :param dependency_grammar: A word-to-word relation dependencygrammar. :type dependency_grammar: DependencyGrammar """ self._grammar = dependency_grammar def parse(self, tokens): """ Performs a projective dependency parse on the list of tokens using a chart-based, span-concatenation algorithm similar to Eisner (1996). :param tokens: The list of input tokens. :type tokens: list(str) :return: An iterator over parse trees. :rtype: iter(Tree) """ self._tokens = list(tokens) chart = [] for i in range(0, len(self._tokens) + 1): chart.append([]) for j in range(0, len(self._tokens) + 1): chart[i].append(ChartCell(i, j)) if i == j + 1: chart[i][j].add(DependencySpan(i - 1, i, i - 1, [-1], ["null"])) for i in range(1, len(self._tokens) + 1): for j in range(i - 2, -1, -1): for k in range(i - 1, j, -1): for span1 in chart[k][j]._entries: for span2 in chart[i][k]._entries: for newspan in self.concatenate(span1, span2): chart[i][j].add(newspan) for parse in chart[len(self._tokens)][0]._entries: conll_format = "" # malt_format = "" for i in range(len(tokens)): # malt_format += '%s\t%s\t%d\t%s\n' % (tokens[i], 'null', parse._arcs[i] + 1, 'null') # conll_format += '\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n' % (i+1, tokens[i], tokens[i], 'null', 'null', 'null', parse._arcs[i] + 1, 'null', '-', '-') # Modify to comply with the new Dependency Graph requirement (at least must have an root elements) conll_format += "\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n" % ( i + 1, tokens[i], tokens[i], "null", "null", "null", parse._arcs[i] + 1, "ROOT", "-", "-", ) dg = DependencyGraph(conll_format) # if self.meets_arity(dg): yield dg.tree() def concatenate(self, span1, span2): """ Concatenates the two spans in whichever way possible. This includes rightward concatenation (from the leftmost word of the leftmost span to the rightmost word of the rightmost span) and leftward concatenation (vice-versa) between adjacent spans. Unlike Eisner's presentation of span concatenation, these spans do not share or pivot on a particular word/word-index. :return: A list of new spans formed through concatenation. :rtype: list(DependencySpan) """ spans = [] if span1._start_index == span2._start_index: print("Error: Mismatched spans - replace this with thrown error") if span1._start_index > span2._start_index: temp_span = span1 span1 = span2 span2 = temp_span # adjacent rightward covered concatenation new_arcs = span1._arcs + span2._arcs new_tags = span1._tags + span2._tags if self._grammar.contains( self._tokens[span1._head_index], self._tokens[span2._head_index] ): # print('Performing rightward cover %d to %d' % (span1._head_index, span2._head_index)) new_arcs[span2._head_index - span1._start_index] = span1._head_index spans.append( DependencySpan( span1._start_index, span2._end_index, span1._head_index, new_arcs, new_tags, ) ) # adjacent leftward covered concatenation new_arcs = span1._arcs + span2._arcs if self._grammar.contains( self._tokens[span2._head_index], self._tokens[span1._head_index] ): # print('performing leftward cover %d to %d' % (span2._head_index, span1._head_index)) new_arcs[span1._head_index - span1._start_index] = span2._head_index spans.append( DependencySpan( span1._start_index, span2._end_index, span2._head_index, new_arcs, new_tags, ) ) return spans class DependencyGrammar: """ A dependency grammar. A DependencyGrammar consists of a set of productions. Each production specifies a head/modifier relationship between a pair of words. """ def __init__(self, productions): """ Create a new dependency grammar, from the set of ``Productions``. :param productions: The list of productions that defines the grammar :type productions: list(Production) """ self._productions = productions def fromstring(cls, input): productions = [] for linenum, line in enumerate(input.split("\n")): line = line.strip() if line.startswith("#") or line == "": continue try: productions += _read_dependency_production(line) except ValueError as e: raise ValueError(f"Unable to parse line {linenum}: {line}") from e if len(productions) == 0: raise ValueError("No productions found!") return cls(productions) def contains(self, head, mod): """ :param head: A head word. :type head: str :param mod: A mod word, to test as a modifier of 'head'. :type mod: str :return: true if this ``DependencyGrammar`` contains a ``DependencyProduction`` mapping 'head' to 'mod'. :rtype: bool """ for production in self._productions: for possibleMod in production._rhs: if production._lhs == head and possibleMod == mod: return True return False def __contains__(self, head_mod): """ Return True if this ``DependencyGrammar`` contains a ``DependencyProduction`` mapping 'head' to 'mod'. :param head_mod: A tuple of a head word and a mod word, to test as a modifier of 'head'. :type head: Tuple[str, str] :rtype: bool """ try: head, mod = head_mod except ValueError as e: raise ValueError( "Must use a tuple of strings, e.g. `('price', 'of') in grammar`" ) from e return self.contains(head, mod) # # should be rewritten, the set comp won't work in all comparisons # def contains_exactly(self, head, modlist): # for production in self._productions: # if(len(production._rhs) == len(modlist)): # if(production._lhs == head): # set1 = Set(production._rhs) # set2 = Set(modlist) # if(set1 == set2): # return True # return False def __str__(self): """ Return a verbose string representation of the ``DependencyGrammar`` :rtype: str """ str = "Dependency grammar with %d productions" % len(self._productions) for production in self._productions: str += "\n %s" % production return str def __repr__(self): """ Return a concise string representation of the ``DependencyGrammar`` """ return "Dependency grammar with %d productions" % len(self._productions) The provided code snippet includes necessary dependencies for implementing the `arity_parse_demo` function. Write a Python function `def arity_parse_demo()` to solve the following problem: A demonstration showing the creation of a ``DependencyGrammar`` in which a specific number of modifiers is listed for a given head. This can further constrain the number of possible parses created by a ``ProjectiveDependencyParser``. Here is the function: def arity_parse_demo(): """ A demonstration showing the creation of a ``DependencyGrammar`` in which a specific number of modifiers is listed for a given head. This can further constrain the number of possible parses created by a ``ProjectiveDependencyParser``. """ print() print("A grammar with no arity constraints. Each DependencyProduction") print("specifies a relationship between one head word and only one") print("modifier word.") grammar = DependencyGrammar.fromstring( """ 'fell' -> 'price' | 'stock' 'price' -> 'of' | 'the' 'of' -> 'stock' 'stock' -> 'the' """ ) print(grammar) print() print("For the sentence 'The price of the stock fell', this grammar") print("will produce the following three parses:") pdp = ProjectiveDependencyParser(grammar) trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"]) for tree in trees: print(tree) print() print("By contrast, the following grammar contains a ") print("DependencyProduction that specifies a relationship") print("between a single head word, 'price', and two modifier") print("words, 'of' and 'the'.") grammar = DependencyGrammar.fromstring( """ 'fell' -> 'price' | 'stock' 'price' -> 'of' 'the' 'of' -> 'stock' 'stock' -> 'the' """ ) print(grammar) print() print( "This constrains the number of possible parses to just one:" ) # unimplemented, soon to replace pdp = ProjectiveDependencyParser(grammar) trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"]) for tree in trees: print(tree)
A demonstration showing the creation of a ``DependencyGrammar`` in which a specific number of modifiers is listed for a given head. This can further constrain the number of possible parses created by a ``ProjectiveDependencyParser``.
170,639
import inspect import os import subprocess import sys import tempfile from nltk.data import ZipFilePathPointer from nltk.internals import find_dir, find_file, find_jars_within_path from nltk.parse.api import ParserI from nltk.parse.dependencygraph import DependencyGraph from nltk.parse.util import taggedsents_to_conll def malt_regex_tagger(): from nltk.tag import RegexpTagger _tagger = RegexpTagger( [ (r"\.$", "."), (r"\,$", ","), (r"\?$", "?"), # fullstop, comma, Qmark (r"\($", "("), (r"\)$", ")"), # round brackets (r"\[$", "["), (r"\]$", "]"), # square brackets (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers (r"(The|the|A|a|An|an)$", "DT"), # articles (r"(He|he|She|she|It|it|I|me|Me|You|you)$", "PRP"), # pronouns (r"(His|his|Her|her|Its|its)$", "PRP$"), # possessive (r"(my|Your|your|Yours|yours)$", "PRP$"), # possessive (r"(on|On|in|In|at|At|since|Since)$", "IN"), # time prepopsitions (r"(for|For|ago|Ago|before|Before)$", "IN"), # time prepopsitions (r"(till|Till|until|Until)$", "IN"), # time prepopsitions (r"(by|By|beside|Beside)$", "IN"), # space prepopsitions (r"(under|Under|below|Below)$", "IN"), # space prepopsitions (r"(over|Over|above|Above)$", "IN"), # space prepopsitions (r"(across|Across|through|Through)$", "IN"), # space prepopsitions (r"(into|Into|towards|Towards)$", "IN"), # space prepopsitions (r"(onto|Onto|from|From)$", "IN"), # space prepopsitions (r".*able$", "JJ"), # adjectives (r".*ness$", "NN"), # nouns formed from adjectives (r".*ly$", "RB"), # adverbs (r".*s$", "NNS"), # plural nouns (r".*ing$", "VBG"), # gerunds (r".*ed$", "VBD"), # past tense verbs (r".*", "NN"), # nouns (default) ] ) return _tagger.tag
null
170,640
import inspect import os import subprocess import sys import tempfile from nltk.data import ZipFilePathPointer from nltk.internals import find_dir, find_file, find_jars_within_path from nltk.parse.api import ParserI from nltk.parse.dependencygraph import DependencyGraph from nltk.parse.util import taggedsents_to_conll def find_dir( filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False ): return next( find_file_iter( filename, env_vars, searchpath, file_names, url, verbose, finding_dir=True ) ) def find_jars_within_path(path_to_jars): return [ os.path.join(root, filename) for root, dirnames, filenames in os.walk(path_to_jars) for filename in fnmatch.filter(filenames, "*.jar") ] The provided code snippet includes necessary dependencies for implementing the `find_maltparser` function. Write a Python function `def find_maltparser(parser_dirname)` to solve the following problem: A module to find MaltParser .jar file and its dependencies. Here is the function: def find_maltparser(parser_dirname): """ A module to find MaltParser .jar file and its dependencies. """ if os.path.exists(parser_dirname): # If a full path is given. _malt_dir = parser_dirname else: # Try to find path to maltparser directory in environment variables. _malt_dir = find_dir(parser_dirname, env_vars=("MALT_PARSER",)) # Checks that that the found directory contains all the necessary .jar malt_dependencies = ["", "", ""] _malt_jars = set(find_jars_within_path(_malt_dir)) _jars = {os.path.split(jar)[1] for jar in _malt_jars} malt_dependencies = {"log4j.jar", "libsvm.jar", "liblinear-1.8.jar"} assert malt_dependencies.issubset(_jars) assert any( filter(lambda i: i.startswith("maltparser-") and i.endswith(".jar"), _jars) ) return list(_malt_jars)
A module to find MaltParser .jar file and its dependencies.
170,641
import inspect import os import subprocess import sys import tempfile from nltk.data import ZipFilePathPointer from nltk.internals import find_dir, find_file, find_jars_within_path from nltk.parse.api import ParserI from nltk.parse.dependencygraph import DependencyGraph from nltk.parse.util import taggedsents_to_conll def find_file( filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False ): return next( find_file_iter(filename, env_vars, searchpath, file_names, url, verbose) ) The provided code snippet includes necessary dependencies for implementing the `find_malt_model` function. Write a Python function `def find_malt_model(model_filename)` to solve the following problem: A module to find pre-trained MaltParser model. Here is the function: def find_malt_model(model_filename): """ A module to find pre-trained MaltParser model. """ if model_filename is None: return "malt_temp.mco" elif os.path.exists(model_filename): # If a full path is given. return model_filename else: # Try to find path to malt model in environment variables. return find_file(model_filename, env_vars=("MALT_MODEL",), verbose=False)
A module to find pre-trained MaltParser model.
170,642
import json import os import re import socket import time from typing import List, Tuple from nltk.internals import _java_options, config_java, find_jar_iter, java from nltk.parse.api import ParserI from nltk.parse.dependencygraph import DependencyGraph from nltk.tag.api import TaggerI from nltk.tokenize.api import TokenizerI from nltk.tree import Tree def try_port(port=0): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("", port)) p = sock.getsockname()[1] sock.close() return p
null
170,643
import json import os import re import socket import time from typing import List, Tuple from nltk.internals import _java_options, config_java, find_jar_iter, java from nltk.parse.api import ParserI from nltk.parse.dependencygraph import DependencyGraph from nltk.tag.api import TaggerI from nltk.tokenize.api import TokenizerI from nltk.tree import Tree def transform(sentence): for dependency in sentence["basicDependencies"]: dependent_index = dependency["dependent"] token = sentence["tokens"][dependent_index - 1] # Return values that we don't know as '_'. Also, consider tag and ctag # to be equal. yield ( dependent_index, "_", token["word"], token["lemma"], token["pos"], token["pos"], "_", str(dependency["governor"]), dependency["dep"], "_", "_", )
null
170,644
import itertools import sys from nltk.grammar import Nonterminal def generate(grammar, start=None, depth=None, n=None): demo_grammar = """ S -> NP VP NP -> Det N PP -> P NP VP -> 'slept' | 'saw' NP | 'walked' PP Det -> 'the' | 'a' N -> 'man' | 'park' | 'dog' P -> 'in' | 'with' """ class CFG: def __init__(self, start, productions, calculate_leftcorners=True): def _calculate_indexes(self): def _calculate_leftcorners(self): def fromstring(cls, input, encoding=None): def start(self): def productions(self, lhs=None, rhs=None, empty=False): def leftcorners(self, cat): def is_leftcorner(self, cat, left): def leftcorner_parents(self, cat): def check_coverage(self, tokens): def _calculate_grammar_forms(self): def is_lexical(self): def is_nonlexical(self): def min_len(self): def max_len(self): def is_nonempty(self): def is_binarised(self): def is_flexible_chomsky_normal_form(self): def is_chomsky_normal_form(self): def chomsky_normal_form(self, new_token_padding="@$@", flexible=False): def remove_unitary_rules(cls, grammar): def binarize(cls, grammar, padding="@$@"): def eliminate_start(cls, grammar): def __repr__(self): def __str__(self): def demo(N=23): from nltk.grammar import CFG print("Generating the first %d sentences for demo grammar:" % (N,)) print(demo_grammar) grammar = CFG.fromstring(demo_grammar) for n, sent in enumerate(generate(grammar, n=N), 1): print("%3d. %s" % (n, " ".join(sent)))
null
170,645
from nltk.grammar import Nonterminal from nltk.parse.api import ParserI from nltk.tree import Tree class ShiftReduceParser(ParserI): """ A simple bottom-up CFG parser that uses two operations, "shift" and "reduce", to find a single parse for a text. ``ShiftReduceParser`` maintains a stack, which records the structure of a portion of the text. This stack is a list of strings and Trees that collectively cover a portion of the text. For example, while parsing the sentence "the dog saw the man" with a typical grammar, ``ShiftReduceParser`` will produce the following stack, which covers "the dog saw":: [(NP: (Det: 'the') (N: 'dog')), (V: 'saw')] ``ShiftReduceParser`` attempts to extend the stack to cover the entire text, and to combine the stack elements into a single tree, producing a complete parse for the sentence. Initially, the stack is empty. It is extended to cover the text, from left to right, by repeatedly applying two operations: - "shift" moves a token from the beginning of the text to the end of the stack. - "reduce" uses a CFG production to combine the rightmost stack elements into a single Tree. Often, more than one operation can be performed on a given stack. In this case, ``ShiftReduceParser`` uses the following heuristics to decide which operation to perform: - Only shift if no reductions are available. - If multiple reductions are available, then apply the reduction whose CFG production is listed earliest in the grammar. Note that these heuristics are not guaranteed to choose an operation that leads to a parse of the text. Also, if multiple parses exists, ``ShiftReduceParser`` will return at most one of them. :see: ``nltk.grammar`` """ def __init__(self, grammar, trace=0): """ Create a new ``ShiftReduceParser``, that uses ``grammar`` to parse texts. :type grammar: Grammar :param grammar: The grammar used to parse texts. :type trace: int :param trace: The level of tracing that should be used when parsing a text. ``0`` will generate no tracing output; and higher numbers will produce more verbose tracing output. """ self._grammar = grammar self._trace = trace self._check_grammar() def grammar(self): return self._grammar def parse(self, tokens): tokens = list(tokens) self._grammar.check_coverage(tokens) # initialize the stack. stack = [] remaining_text = tokens # Trace output. if self._trace: print("Parsing %r" % " ".join(tokens)) self._trace_stack(stack, remaining_text) # iterate through the text, pushing the token onto # the stack, then reducing the stack. while len(remaining_text) > 0: self._shift(stack, remaining_text) while self._reduce(stack, remaining_text): pass # Did we reduce everything? if len(stack) == 1: # Did we end up with the right category? if stack[0].label() == self._grammar.start().symbol(): yield stack[0] def _shift(self, stack, remaining_text): """ Move a token from the beginning of ``remaining_text`` to the end of ``stack``. :type stack: list(str and Tree) :param stack: A list of strings and Trees, encoding the structure of the text that has been parsed so far. :type remaining_text: list(str) :param remaining_text: The portion of the text that is not yet covered by ``stack``. :rtype: None """ stack.append(remaining_text[0]) remaining_text.remove(remaining_text[0]) if self._trace: self._trace_shift(stack, remaining_text) def _match_rhs(self, rhs, rightmost_stack): """ :rtype: bool :return: true if the right hand side of a CFG production matches the rightmost elements of the stack. ``rhs`` matches ``rightmost_stack`` if they are the same length, and each element of ``rhs`` matches the corresponding element of ``rightmost_stack``. A nonterminal element of ``rhs`` matches any Tree whose node value is equal to the nonterminal's symbol. A terminal element of ``rhs`` matches any string whose type is equal to the terminal. :type rhs: list(terminal and Nonterminal) :param rhs: The right hand side of a CFG production. :type rightmost_stack: list(string and Tree) :param rightmost_stack: The rightmost elements of the parser's stack. """ if len(rightmost_stack) != len(rhs): return False for i in range(len(rightmost_stack)): if isinstance(rightmost_stack[i], Tree): if not isinstance(rhs[i], Nonterminal): return False if rightmost_stack[i].label() != rhs[i].symbol(): return False else: if isinstance(rhs[i], Nonterminal): return False if rightmost_stack[i] != rhs[i]: return False return True def _reduce(self, stack, remaining_text, production=None): """ Find a CFG production whose right hand side matches the rightmost stack elements; and combine those stack elements into a single Tree, with the node specified by the production's left-hand side. If more than one CFG production matches the stack, then use the production that is listed earliest in the grammar. The new Tree replaces the elements in the stack. :rtype: Production or None :return: If a reduction is performed, then return the CFG production that the reduction is based on; otherwise, return false. :type stack: list(string and Tree) :param stack: A list of strings and Trees, encoding the structure of the text that has been parsed so far. :type remaining_text: list(str) :param remaining_text: The portion of the text that is not yet covered by ``stack``. """ if production is None: productions = self._grammar.productions() else: productions = [production] # Try each production, in order. for production in productions: rhslen = len(production.rhs()) # check if the RHS of a production matches the top of the stack if self._match_rhs(production.rhs(), stack[-rhslen:]): # combine the tree to reflect the reduction tree = Tree(production.lhs().symbol(), stack[-rhslen:]) stack[-rhslen:] = [tree] # We reduced something if self._trace: self._trace_reduce(stack, production, remaining_text) return production # We didn't reduce anything return None def trace(self, trace=2): """ Set the level of tracing output that should be generated when parsing a text. :type trace: int :param trace: The trace level. A trace level of ``0`` will generate no tracing output; and higher trace levels will produce more verbose tracing output. :rtype: None """ # 1: just show shifts. # 2: show shifts & reduces # 3: display which tokens & productions are shifed/reduced self._trace = trace def _trace_stack(self, stack, remaining_text, marker=" "): """ Print trace output displaying the given stack and text. :rtype: None :param marker: A character that is printed to the left of the stack. This is used with trace level 2 to print 'S' before shifted stacks and 'R' before reduced stacks. """ s = " " + marker + " [ " for elt in stack: if isinstance(elt, Tree): s += repr(Nonterminal(elt.label())) + " " else: s += repr(elt) + " " s += "* " + " ".join(remaining_text) + "]" print(s) def _trace_shift(self, stack, remaining_text): """ Print trace output displaying that a token has been shifted. :rtype: None """ if self._trace > 2: print("Shift %r:" % stack[-1]) if self._trace == 2: self._trace_stack(stack, remaining_text, "S") elif self._trace > 0: self._trace_stack(stack, remaining_text) def _trace_reduce(self, stack, production, remaining_text): """ Print trace output displaying that ``production`` was used to reduce ``stack``. :rtype: None """ if self._trace > 2: rhs = " ".join(production.rhs()) print(f"Reduce {production.lhs()!r} <- {rhs}") if self._trace == 2: self._trace_stack(stack, remaining_text, "R") elif self._trace > 1: self._trace_stack(stack, remaining_text) def _check_grammar(self): """ Check to make sure that all of the CFG productions are potentially useful. If any productions can never be used, then print a warning. :rtype: None """ productions = self._grammar.productions() # Any production whose RHS is an extension of another production's RHS # will never be used. for i in range(len(productions)): for j in range(i + 1, len(productions)): rhs1 = productions[i].rhs() rhs2 = productions[j].rhs() if rhs1[: len(rhs2)] == rhs2: print("Warning: %r will never be used" % productions[i]) The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem: A demonstration of the shift-reduce parser. Here is the function: def demo(): """ A demonstration of the shift-reduce parser. """ from nltk import CFG, parse grammar = CFG.fromstring( """ S -> NP VP NP -> Det N | Det N PP VP -> V NP | V NP PP PP -> P NP NP -> 'I' N -> 'man' | 'park' | 'telescope' | 'dog' Det -> 'the' | 'a' P -> 'in' | 'with' V -> 'saw' """ ) sent = "I saw a man in the park".split() parser = parse.ShiftReduceParser(grammar, trace=2) for p in parser.parse(sent): print(p)
A demonstration of the shift-reduce parser.
170,646
import itertools import re import warnings from functools import total_ordering from nltk.grammar import PCFG, is_nonterminal, is_terminal from nltk.internals import raise_unorderable_types from nltk.parse.api import ParserI from nltk.tree import Tree from nltk.util import OrderedDict def is_terminal(item): """ Return True if the item is a terminal, which currently is if it is hashable and not a ``Nonterminal``. :rtype: bool """ return hasattr(item, "__hash__") and not isinstance(item, Nonterminal) def _bottomup_filter(grammar, nexttoken, rhs, dot=0): if len(rhs) <= dot + 1: return True _next = rhs[dot + 1] if is_terminal(_next): return nexttoken == _next else: return grammar.is_leftcorner(_next, nexttoken)
null
170,647
import itertools import re import warnings from functools import total_ordering from nltk.grammar import PCFG, is_nonterminal, is_terminal from nltk.internals import raise_unorderable_types from nltk.parse.api import ParserI from nltk.tree import Tree from nltk.util import OrderedDict TD_STRATEGY = [ LeafInitRule(), TopDownInitRule(), CachedTopDownPredictRule(), SingleEdgeFundamentalRule(), ] BU_STRATEGY = [ LeafInitRule(), EmptyPredictRule(), BottomUpPredictRule(), SingleEdgeFundamentalRule(), ] BU_LC_STRATEGY = [ LeafInitRule(), EmptyPredictRule(), BottomUpPredictCombineRule(), SingleEdgeFundamentalRule(), ] LC_STRATEGY = [ LeafInitRule(), FilteredBottomUpPredictCombineRule(), FilteredSingleEdgeFundamentalRule(), ] class ChartParser(ParserI): """ A generic chart parser. A "strategy", or list of ``ChartRuleI`` instances, is used to decide what edges to add to the chart. In particular, ``ChartParser`` uses the following algorithm to parse texts: | Until no new edges are added: | For each *rule* in *strategy*: | Apply *rule* to any applicable edges in the chart. | Return any complete parses in the chart """ def __init__( self, grammar, strategy=BU_LC_STRATEGY, trace=0, trace_chart_width=50, use_agenda=True, chart_class=Chart, ): """ Create a new chart parser, that uses ``grammar`` to parse texts. :type grammar: CFG :param grammar: The grammar used to parse texts. :type strategy: list(ChartRuleI) :param strategy: A list of rules that should be used to decide what edges to add to the chart (top-down strategy by default). :type trace: int :param trace: The level of tracing that should be used when parsing a text. ``0`` will generate no tracing output; and higher numbers will produce more verbose tracing output. :type trace_chart_width: int :param trace_chart_width: The default total width reserved for the chart in trace output. The remainder of each line will be used to display edges. :type use_agenda: bool :param use_agenda: Use an optimized agenda-based algorithm, if possible. :param chart_class: The class that should be used to create the parse charts. """ self._grammar = grammar self._strategy = strategy self._trace = trace self._trace_chart_width = trace_chart_width # If the strategy only consists of axioms (NUM_EDGES==0) and # inference rules (NUM_EDGES==1), we can use an agenda-based algorithm: self._use_agenda = use_agenda self._chart_class = chart_class self._axioms = [] self._inference_rules = [] for rule in strategy: if rule.NUM_EDGES == 0: self._axioms.append(rule) elif rule.NUM_EDGES == 1: self._inference_rules.append(rule) else: self._use_agenda = False def grammar(self): return self._grammar def _trace_new_edges(self, chart, rule, new_edges, trace, edge_width): if not trace: return print_rule_header = trace > 1 for edge in new_edges: if print_rule_header: print("%s:" % rule) print_rule_header = False print(chart.pretty_format_edge(edge, edge_width)) def chart_parse(self, tokens, trace=None): """ Return the final parse ``Chart`` from which all possible parse trees can be extracted. :param tokens: The sentence to be parsed :type tokens: list(str) :rtype: Chart """ if trace is None: trace = self._trace trace_new_edges = self._trace_new_edges tokens = list(tokens) self._grammar.check_coverage(tokens) chart = self._chart_class(tokens) grammar = self._grammar # Width, for printing trace edges. trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1) if trace: print(chart.pretty_format_leaves(trace_edge_width)) if self._use_agenda: # Use an agenda-based algorithm. for axiom in self._axioms: new_edges = list(axiom.apply(chart, grammar)) trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width) inference_rules = self._inference_rules agenda = chart.edges() # We reverse the initial agenda, since it is a stack # but chart.edges() functions as a queue. agenda.reverse() while agenda: edge = agenda.pop() for rule in inference_rules: new_edges = list(rule.apply(chart, grammar, edge)) if trace: trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) agenda += new_edges else: # Do not use an agenda-based algorithm. edges_added = True while edges_added: edges_added = False for rule in self._strategy: new_edges = list(rule.apply_everywhere(chart, grammar)) edges_added = len(new_edges) trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) # Return the final chart. return chart def parse(self, tokens, tree_class=Tree): chart = self.chart_parse(tokens) return iter(chart.parses(self._grammar.start(), tree_class=tree_class)) class SteppingChartParser(ChartParser): """ A ``ChartParser`` that allows you to step through the parsing process, adding a single edge at a time. It also allows you to change the parser's strategy or grammar midway through parsing a text. The ``initialize`` method is used to start parsing a text. ``step`` adds a single edge to the chart. ``set_strategy`` changes the strategy used by the chart parser. ``parses`` returns the set of parses that has been found by the chart parser. :ivar _restart: Records whether the parser's strategy, grammar, or chart has been changed. If so, then ``step`` must restart the parsing algorithm. """ def __init__(self, grammar, strategy=[], trace=0): self._chart = None self._current_chartrule = None self._restart = False ChartParser.__init__(self, grammar, strategy, trace) # //////////////////////////////////////////////////////////// # Initialization # //////////////////////////////////////////////////////////// def initialize(self, tokens): "Begin parsing the given tokens." self._chart = Chart(list(tokens)) self._restart = True # //////////////////////////////////////////////////////////// # Stepping # //////////////////////////////////////////////////////////// def step(self): """ Return a generator that adds edges to the chart, one at a time. Each time the generator is resumed, it adds a single edge and yields that edge. If no more edges can be added, then it yields None. If the parser's strategy, grammar, or chart is changed, then the generator will continue adding edges using the new strategy, grammar, or chart. Note that this generator never terminates, since the grammar or strategy might be changed to values that would add new edges. Instead, it yields None when no more edges can be added with the current strategy and grammar. """ if self._chart is None: raise ValueError("Parser must be initialized first") while True: self._restart = False w = 50 // (self._chart.num_leaves() + 1) for e in self._parse(): if self._trace > 1: print(self._current_chartrule) if self._trace > 0: print(self._chart.pretty_format_edge(e, w)) yield e if self._restart: break else: yield None # No more edges. def _parse(self): """ A generator that implements the actual parsing algorithm. ``step`` iterates through this generator, and restarts it whenever the parser's strategy, grammar, or chart is modified. """ chart = self._chart grammar = self._grammar edges_added = 1 while edges_added > 0: edges_added = 0 for rule in self._strategy: self._current_chartrule = rule for e in rule.apply_everywhere(chart, grammar): edges_added += 1 yield e # //////////////////////////////////////////////////////////// # Accessors # //////////////////////////////////////////////////////////// def strategy(self): "Return the strategy used by this parser." return self._strategy def grammar(self): "Return the grammar used by this parser." return self._grammar def chart(self): "Return the chart that is used by this parser." return self._chart def current_chartrule(self): "Return the chart rule used to generate the most recent edge." return self._current_chartrule def parses(self, tree_class=Tree): "Return the parse trees currently contained in the chart." return self._chart.parses(self._grammar.start(), tree_class) # //////////////////////////////////////////////////////////// # Parser modification # //////////////////////////////////////////////////////////// def set_strategy(self, strategy): """ Change the strategy that the parser uses to decide which edges to add to the chart. :type strategy: list(ChartRuleI) :param strategy: A list of rules that should be used to decide what edges to add to the chart. """ if strategy == self._strategy: return self._strategy = strategy[:] # Make a copy. self._restart = True def set_grammar(self, grammar): "Change the grammar used by the parser." if grammar is self._grammar: return self._grammar = grammar self._restart = True def set_chart(self, chart): "Load a given chart into the chart parser." if chart is self._chart: return self._chart = chart self._restart = True # //////////////////////////////////////////////////////////// # Standard parser methods # //////////////////////////////////////////////////////////// def parse(self, tokens, tree_class=Tree): tokens = list(tokens) self._grammar.check_coverage(tokens) # Initialize ourselves. self.initialize(tokens) # Step until no more edges are generated. for e in self.step(): if e is None: break # Return an iterator of complete parses. return self.parses(tree_class=tree_class) def demo_grammar(): from nltk.grammar import CFG return CFG.fromstring( """ S -> NP VP PP -> "with" NP NP -> NP PP VP -> VP PP VP -> Verb NP VP -> Verb NP -> Det Noun NP -> "John" NP -> "I" Det -> "the" Det -> "my" Det -> "a" Noun -> "dog" Noun -> "cookie" Verb -> "ate" Verb -> "saw" Prep -> "with" Prep -> "under" """ ) class CFG: """ A context-free grammar. A grammar consists of a start state and a set of productions. The set of terminals and nonterminals is implicitly specified by the productions. If you need efficient key-based access to productions, you can use a subclass to implement it. """ def __init__(self, start, productions, calculate_leftcorners=True): """ Create a new context-free grammar, from the given start state and set of ``Production`` instances. :param start: The start symbol :type start: Nonterminal :param productions: The list of productions that defines the grammar :type productions: list(Production) :param calculate_leftcorners: False if we don't want to calculate the leftcorner relation. In that case, some optimized chart parsers won't work. :type calculate_leftcorners: bool """ if not is_nonterminal(start): raise TypeError( "start should be a Nonterminal object," " not a %s" % type(start).__name__ ) self._start = start self._productions = productions self._categories = {prod.lhs() for prod in productions} self._calculate_indexes() self._calculate_grammar_forms() if calculate_leftcorners: self._calculate_leftcorners() def _calculate_indexes(self): self._lhs_index = {} self._rhs_index = {} self._empty_index = {} self._lexical_index = {} for prod in self._productions: # Left hand side. lhs = prod._lhs if lhs not in self._lhs_index: self._lhs_index[lhs] = [] self._lhs_index[lhs].append(prod) if prod._rhs: # First item in right hand side. rhs0 = prod._rhs[0] if rhs0 not in self._rhs_index: self._rhs_index[rhs0] = [] self._rhs_index[rhs0].append(prod) else: # The right hand side is empty. self._empty_index[prod.lhs()] = prod # Lexical tokens in the right hand side. for token in prod._rhs: if is_terminal(token): self._lexical_index.setdefault(token, set()).add(prod) def _calculate_leftcorners(self): # Calculate leftcorner relations, for use in optimized parsing. self._immediate_leftcorner_categories = {cat: {cat} for cat in self._categories} self._immediate_leftcorner_words = {cat: set() for cat in self._categories} for prod in self.productions(): if len(prod) > 0: cat, left = prod.lhs(), prod.rhs()[0] if is_nonterminal(left): self._immediate_leftcorner_categories[cat].add(left) else: self._immediate_leftcorner_words[cat].add(left) lc = transitive_closure(self._immediate_leftcorner_categories, reflexive=True) self._leftcorners = lc self._leftcorner_parents = invert_graph(lc) nr_leftcorner_categories = sum( map(len, self._immediate_leftcorner_categories.values()) ) nr_leftcorner_words = sum(map(len, self._immediate_leftcorner_words.values())) if nr_leftcorner_words > nr_leftcorner_categories > 10000: # If the grammar is big, the leftcorner-word dictionary will be too large. # In that case it is better to calculate the relation on demand. self._leftcorner_words = None return self._leftcorner_words = {} for cat in self._leftcorners: lefts = self._leftcorners[cat] lc = self._leftcorner_words[cat] = set() for left in lefts: lc.update(self._immediate_leftcorner_words.get(left, set())) def fromstring(cls, input, encoding=None): """ Return the grammar instance corresponding to the input string(s). :param input: a grammar, either in the form of a string or as a list of strings. """ start, productions = read_grammar( input, standard_nonterm_parser, encoding=encoding ) return cls(start, productions) def start(self): """ Return the start symbol of the grammar :rtype: Nonterminal """ return self._start # tricky to balance readability and efficiency here! # can't use set operations as they don't preserve ordering def productions(self, lhs=None, rhs=None, empty=False): """ Return the grammar productions, filtered by the left-hand side or the first item in the right-hand side. :param lhs: Only return productions with the given left-hand side. :param rhs: Only return productions with the given first item in the right-hand side. :param empty: Only return productions with an empty right-hand side. :return: A list of productions matching the given constraints. :rtype: list(Production) """ if rhs and empty: raise ValueError( "You cannot select empty and non-empty " "productions at the same time." ) # no constraints so return everything if not lhs and not rhs: if not empty: return self._productions else: return self._empty_index.values() # only lhs specified so look up its index elif lhs and not rhs: if not empty: return self._lhs_index.get(lhs, []) elif lhs in self._empty_index: return [self._empty_index[lhs]] else: return [] # only rhs specified so look up its index elif rhs and not lhs: return self._rhs_index.get(rhs, []) # intersect else: return [ prod for prod in self._lhs_index.get(lhs, []) if prod in self._rhs_index.get(rhs, []) ] def leftcorners(self, cat): """ Return the set of all nonterminals that the given nonterminal can start with, including itself. This is the reflexive, transitive closure of the immediate leftcorner relation: (A > B) iff (A -> B beta) :param cat: the parent of the leftcorners :type cat: Nonterminal :return: the set of all leftcorners :rtype: set(Nonterminal) """ return self._leftcorners.get(cat, {cat}) def is_leftcorner(self, cat, left): """ True if left is a leftcorner of cat, where left can be a terminal or a nonterminal. :param cat: the parent of the leftcorner :type cat: Nonterminal :param left: the suggested leftcorner :type left: Terminal or Nonterminal :rtype: bool """ if is_nonterminal(left): return left in self.leftcorners(cat) elif self._leftcorner_words: return left in self._leftcorner_words.get(cat, set()) else: return any( left in self._immediate_leftcorner_words.get(parent, set()) for parent in self.leftcorners(cat) ) def leftcorner_parents(self, cat): """ Return the set of all nonterminals for which the given category is a left corner. This is the inverse of the leftcorner relation. :param cat: the suggested leftcorner :type cat: Nonterminal :return: the set of all parents to the leftcorner :rtype: set(Nonterminal) """ return self._leftcorner_parents.get(cat, {cat}) def check_coverage(self, tokens): """ Check whether the grammar rules cover the given list of tokens. If not, then raise an exception. :type tokens: list(str) """ missing = [tok for tok in tokens if not self._lexical_index.get(tok)] if missing: missing = ", ".join(f"{w!r}" for w in missing) raise ValueError( "Grammar does not cover some of the " "input words: %r." % missing ) def _calculate_grammar_forms(self): """ Pre-calculate of which form(s) the grammar is. """ prods = self._productions self._is_lexical = all(p.is_lexical() for p in prods) self._is_nonlexical = all(p.is_nonlexical() for p in prods if len(p) != 1) self._min_len = min(len(p) for p in prods) self._max_len = max(len(p) for p in prods) self._all_unary_are_lexical = all(p.is_lexical() for p in prods if len(p) == 1) def is_lexical(self): """ Return True if all productions are lexicalised. """ return self._is_lexical def is_nonlexical(self): """ Return True if all lexical rules are "preterminals", that is, unary rules which can be separated in a preprocessing step. This means that all productions are of the forms A -> B1 ... Bn (n>=0), or A -> "s". Note: is_lexical() and is_nonlexical() are not opposites. There are grammars which are neither, and grammars which are both. """ return self._is_nonlexical def min_len(self): """ Return the right-hand side length of the shortest grammar production. """ return self._min_len def max_len(self): """ Return the right-hand side length of the longest grammar production. """ return self._max_len def is_nonempty(self): """ Return True if there are no empty productions. """ return self._min_len > 0 def is_binarised(self): """ Return True if all productions are at most binary. Note that there can still be empty and unary productions. """ return self._max_len <= 2 def is_flexible_chomsky_normal_form(self): """ Return True if all productions are of the forms A -> B C, A -> B, or A -> "s". """ return self.is_nonempty() and self.is_nonlexical() and self.is_binarised() def is_chomsky_normal_form(self): """ Return True if the grammar is of Chomsky Normal Form, i.e. all productions are of the form A -> B C, or A -> "s". """ return self.is_flexible_chomsky_normal_form() and self._all_unary_are_lexical def chomsky_normal_form(self, new_token_padding="@$@", flexible=False): """ Returns a new Grammar that is in chomsky normal :param: new_token_padding Customise new rule formation during binarisation """ if self.is_chomsky_normal_form(): return self if self.productions(empty=True): raise ValueError( "Grammar has Empty rules. " "Cannot deal with them at the moment" ) # check for mixed rules for rule in self.productions(): if rule.is_lexical() and len(rule.rhs()) > 1: raise ValueError( f"Cannot handled mixed rule {rule.lhs()} => {rule.rhs()}" ) step1 = CFG.eliminate_start(self) step2 = CFG.binarize(step1, new_token_padding) if flexible: return step2 step3 = CFG.remove_unitary_rules(step2) step4 = CFG(step3.start(), list(set(step3.productions()))) return step4 def remove_unitary_rules(cls, grammar): """ Remove nonlexical unitary rules and convert them to lexical """ result = [] unitary = [] for rule in grammar.productions(): if len(rule) == 1 and rule.is_nonlexical(): unitary.append(rule) else: result.append(rule) while unitary: rule = unitary.pop(0) for item in grammar.productions(lhs=rule.rhs()[0]): new_rule = Production(rule.lhs(), item.rhs()) if len(new_rule) != 1 or new_rule.is_lexical(): result.append(new_rule) else: unitary.append(new_rule) n_grammar = CFG(grammar.start(), result) return n_grammar def binarize(cls, grammar, padding="@$@"): """ Convert all non-binary rules into binary by introducing new tokens. Example:: Original: A => B C D After Conversion: A => B A@$@B A@$@B => C D """ result = [] for rule in grammar.productions(): if len(rule.rhs()) > 2: # this rule needs to be broken down left_side = rule.lhs() for k in range(0, len(rule.rhs()) - 2): tsym = rule.rhs()[k] new_sym = Nonterminal(left_side.symbol() + padding + tsym.symbol()) new_production = Production(left_side, (tsym, new_sym)) left_side = new_sym result.append(new_production) last_prd = Production(left_side, rule.rhs()[-2:]) result.append(last_prd) else: result.append(rule) n_grammar = CFG(grammar.start(), result) return n_grammar def eliminate_start(cls, grammar): """ Eliminate start rule in case it appears on RHS Example: S -> S0 S1 and S0 -> S1 S Then another rule S0_Sigma -> S is added """ start = grammar.start() result = [] need_to_add = None for rule in grammar.productions(): if start in rule.rhs(): need_to_add = True result.append(rule) if need_to_add: start = Nonterminal("S0_SIGMA") result.append(Production(start, [grammar.start()])) n_grammar = CFG(start, result) return n_grammar return grammar def __repr__(self): return "<Grammar with %d productions>" % len(self._productions) def __str__(self): result = "Grammar with %d productions" % len(self._productions) result += " (start state = %r)" % self._start for production in self._productions: result += "\n %s" % production return result import sys if sys.version_info >= (3, 9): from types import GenericAlias if sys.version_info >= (3, 8): def parse_qs( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ..., max_num_fields: Optional[int] = ..., ) -> Dict[AnyStr, List[AnyStr]]: ... def parse_qsl( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ..., max_num_fields: Optional[int] = ..., ) -> List[Tuple[AnyStr, AnyStr]]: ... else: def parse_qs( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ... ) -> Dict[AnyStr, List[AnyStr]]: ... def parse_qsl( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ... ) -> List[Tuple[AnyStr, AnyStr]]: ... The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo( choice=None, print_times=True, print_grammar=False, print_trees=True, trace=2, sent="I saw John with a dog with my cookie", numparses=5, )` to solve the following problem: A demonstration of the chart parsers. Here is the function: def demo( choice=None, print_times=True, print_grammar=False, print_trees=True, trace=2, sent="I saw John with a dog with my cookie", numparses=5, ): """ A demonstration of the chart parsers. """ import sys import time from nltk import CFG, Production, nonterminals # The grammar for ChartParser and SteppingChartParser: grammar = demo_grammar() if print_grammar: print("* Grammar") print(grammar) # Tokenize the sample sentence. print("* Sentence:") print(sent) tokens = sent.split() print(tokens) print() # Ask the user which parser to test, # if the parser wasn't provided as an argument if choice is None: print(" 1: Top-down chart parser") print(" 2: Bottom-up chart parser") print(" 3: Bottom-up left-corner chart parser") print(" 4: Left-corner chart parser with bottom-up filter") print(" 5: Stepping chart parser (alternating top-down & bottom-up)") print(" 6: All parsers") print("\nWhich parser (1-6)? ", end=" ") choice = sys.stdin.readline().strip() print() choice = str(choice) if choice not in "123456": print("Bad parser number") return # Keep track of how long each parser takes. times = {} strategies = { "1": ("Top-down", TD_STRATEGY), "2": ("Bottom-up", BU_STRATEGY), "3": ("Bottom-up left-corner", BU_LC_STRATEGY), "4": ("Filtered left-corner", LC_STRATEGY), } choices = [] if choice in strategies: choices = [choice] if choice == "6": choices = "1234" # Run the requested chart parser(s), except the stepping parser. for strategy in choices: print("* Strategy: " + strategies[strategy][0]) print() cp = ChartParser(grammar, strategies[strategy][1], trace=trace) t = time.time() chart = cp.chart_parse(tokens) parses = list(chart.parses(grammar.start())) times[strategies[strategy][0]] = time.time() - t print("Nr edges in chart:", len(chart.edges())) if numparses: assert len(parses) == numparses, "Not all parses found" if print_trees: for tree in parses: print(tree) else: print("Nr trees:", len(parses)) print() # Run the stepping parser, if requested. if choice in "56": print("* Strategy: Stepping (top-down vs bottom-up)") print() t = time.time() cp = SteppingChartParser(grammar, trace=trace) cp.initialize(tokens) for i in range(5): print("*** SWITCH TO TOP DOWN") cp.set_strategy(TD_STRATEGY) for j, e in enumerate(cp.step()): if j > 20 or e is None: break print("*** SWITCH TO BOTTOM UP") cp.set_strategy(BU_STRATEGY) for j, e in enumerate(cp.step()): if j > 20 or e is None: break times["Stepping"] = time.time() - t print("Nr edges in chart:", len(cp.chart().edges())) if numparses: assert len(list(cp.parses())) == numparses, "Not all parses found" if print_trees: for tree in cp.parses(): print(tree) else: print("Nr trees:", len(list(cp.parses()))) print() # Print the times of all parsers: if not (print_times and times): return print("* Parsing times") print() maxlen = max(len(key) for key in times) format = "%" + repr(maxlen) + "s parser: %6.3fsec" times_items = times.items() for (parser, t) in sorted(times_items, key=lambda a: a[1]): print(format % (parser, t))
A demonstration of the chart parsers.
170,648
from time import perf_counter from nltk.parse.chart import ( BottomUpPredictCombineRule, BottomUpPredictRule, CachedTopDownPredictRule, Chart, ChartParser, EdgeI, EmptyPredictRule, FilteredBottomUpPredictCombineRule, FilteredSingleEdgeFundamentalRule, LeafEdge, LeafInitRule, SingleEdgeFundamentalRule, TopDownInitRule, ) from nltk.parse.featurechart import ( FeatureBottomUpPredictCombineRule, FeatureBottomUpPredictRule, FeatureChart, FeatureChartParser, FeatureEmptyPredictRule, FeatureSingleEdgeFundamentalRule, FeatureTopDownInitRule, FeatureTopDownPredictRule, ) class EarleyChartParser(IncrementalChartParser): def __init__(self, grammar, **parser_args): IncrementalChartParser.__init__(self, grammar, EARLEY_STRATEGY, **parser_args) import sys if sys.version_info >= (3, 9): from types import GenericAlias if sys.version_info >= (3, 8): def parse_qs( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ..., max_num_fields: Optional[int] = ..., ) -> Dict[AnyStr, List[AnyStr]]: ... def parse_qsl( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ..., max_num_fields: Optional[int] = ..., ) -> List[Tuple[AnyStr, AnyStr]]: ... else: def parse_qs( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ... ) -> Dict[AnyStr, List[AnyStr]]: ... def parse_qsl( qs: Optional[AnyStr], keep_blank_values: bool = ..., strict_parsing: bool = ..., encoding: str = ..., errors: str = ... ) -> List[Tuple[AnyStr, AnyStr]]: ... def demo_grammar(): from nltk.grammar import CFG return CFG.fromstring( """ S -> NP VP PP -> "with" NP NP -> NP PP VP -> VP PP VP -> Verb NP VP -> Verb NP -> Det Noun NP -> "John" NP -> "I" Det -> "the" Det -> "my" Det -> "a" Noun -> "dog" Noun -> "cookie" Verb -> "ate" Verb -> "saw" Prep -> "with" Prep -> "under" """ ) The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo( print_times=True, print_grammar=False, print_trees=True, trace=2, sent="I saw John with a dog with my cookie", numparses=5, )` to solve the following problem: A demonstration of the Earley parsers. Here is the function: def demo( print_times=True, print_grammar=False, print_trees=True, trace=2, sent="I saw John with a dog with my cookie", numparses=5, ): """ A demonstration of the Earley parsers. """ import sys import time from nltk.parse.chart import demo_grammar # The grammar for ChartParser and SteppingChartParser: grammar = demo_grammar() if print_grammar: print("* Grammar") print(grammar) # Tokenize the sample sentence. print("* Sentence:") print(sent) tokens = sent.split() print(tokens) print() # Do the parsing. earley = EarleyChartParser(grammar, trace=trace) t = perf_counter() chart = earley.chart_parse(tokens) parses = list(chart.parses(grammar.start())) t = perf_counter() - t # Print results. if numparses: assert len(parses) == numparses, "Not all parses found" if print_trees: for tree in parses: print(tree) else: print("Nr trees:", len(parses)) if print_times: print("Time:", t)
A demonstration of the Earley parsers.
170,649
import re from functools import total_ordering from nltk.featstruct import SLASH, TYPE, FeatDict, FeatStruct, FeatStructReader from nltk.internals import raise_unorderable_types from nltk.probability import ImmutableProbabilisticMixIn from nltk.util import invert_graph, transitive_closure class Nonterminal: """ A non-terminal symbol for a context free grammar. ``Nonterminal`` is a wrapper class for node values; it is used by ``Production`` objects to distinguish node values from leaf values. The node value that is wrapped by a ``Nonterminal`` is known as its "symbol". Symbols are typically strings representing phrasal categories (such as ``"NP"`` or ``"VP"``). However, more complex symbol types are sometimes used (e.g., for lexicalized grammars). Since symbols are node values, they must be immutable and hashable. Two ``Nonterminals`` are considered equal if their symbols are equal. :see: ``CFG``, ``Production`` :type _symbol: any :ivar _symbol: The node value corresponding to this ``Nonterminal``. This value must be immutable and hashable. """ def __init__(self, symbol): """ Construct a new non-terminal from the given symbol. :type symbol: any :param symbol: The node value corresponding to this ``Nonterminal``. This value must be immutable and hashable. """ self._symbol = symbol def symbol(self): """ Return the node value corresponding to this ``Nonterminal``. :rtype: (any) """ return self._symbol def __eq__(self, other): """ Return True if this non-terminal is equal to ``other``. In particular, return True if ``other`` is a ``Nonterminal`` and this non-terminal's symbol is equal to ``other`` 's symbol. :rtype: bool """ return type(self) == type(other) and self._symbol == other._symbol def __ne__(self, other): return not self == other def __lt__(self, other): if not isinstance(other, Nonterminal): raise_unorderable_types("<", self, other) return self._symbol < other._symbol def __hash__(self): return hash(self._symbol) def __repr__(self): """ Return a string representation for this ``Nonterminal``. :rtype: str """ if isinstance(self._symbol, str): return "%s" % self._symbol else: return "%s" % repr(self._symbol) def __str__(self): """ Return a string representation for this ``Nonterminal``. :rtype: str """ if isinstance(self._symbol, str): return "%s" % self._symbol else: return "%s" % repr(self._symbol) def __div__(self, rhs): """ Return a new nonterminal whose symbol is ``A/B``, where ``A`` is the symbol for this nonterminal, and ``B`` is the symbol for rhs. :param rhs: The nonterminal used to form the right hand side of the new nonterminal. :type rhs: Nonterminal :rtype: Nonterminal """ return Nonterminal(f"{self._symbol}/{rhs._symbol}") def __truediv__(self, rhs): """ Return a new nonterminal whose symbol is ``A/B``, where ``A`` is the symbol for this nonterminal, and ``B`` is the symbol for rhs. This function allows use of the slash ``/`` operator with the future import of division. :param rhs: The nonterminal used to form the right hand side of the new nonterminal. :type rhs: Nonterminal :rtype: Nonterminal """ return self.__div__(rhs) The provided code snippet includes necessary dependencies for implementing the `is_nonterminal` function. Write a Python function `def is_nonterminal(item)` to solve the following problem: :return: True if the item is a ``Nonterminal``. :rtype: bool Here is the function: def is_nonterminal(item): """ :return: True if the item is a ``Nonterminal``. :rtype: bool """ return isinstance(item, Nonterminal)
:return: True if the item is a ``Nonterminal``. :rtype: bool
170,650
import re from functools import total_ordering from nltk.featstruct import SLASH, TYPE, FeatDict, FeatStruct, FeatStructReader from nltk.internals import raise_unorderable_types from nltk.probability import ImmutableProbabilisticMixIn from nltk.util import invert_graph, transitive_closure def _read_production(line, nonterm_parser, probabilistic=False): """ Parse a grammar rule, given as a string, and return a list of productions. """ pos = 0 # Parse the left-hand side. lhs, pos = nonterm_parser(line, pos) # Skip over the arrow. m = _ARROW_RE.match(line, pos) if not m: raise ValueError("Expected an arrow") pos = m.end() # Parse the right hand side. probabilities = [0.0] rhsides = [[]] while pos < len(line): # Probability. m = _PROBABILITY_RE.match(line, pos) if probabilistic and m: pos = m.end() probabilities[-1] = float(m.group(1)[1:-1]) if probabilities[-1] > 1.0: raise ValueError( "Production probability %f, " "should not be greater than 1.0" % (probabilities[-1],) ) # String -- add terminal. elif line[pos] in "'\"": m = _TERMINAL_RE.match(line, pos) if not m: raise ValueError("Unterminated string") rhsides[-1].append(m.group(1)[1:-1]) pos = m.end() # Vertical bar -- start new rhside. elif line[pos] == "|": m = _DISJUNCTION_RE.match(line, pos) probabilities.append(0.0) rhsides.append([]) pos = m.end() # Anything else -- nonterminal. else: nonterm, pos = nonterm_parser(line, pos) rhsides[-1].append(nonterm) if probabilistic: return [ ProbabilisticProduction(lhs, rhs, prob=probability) for (rhs, probability) in zip(rhsides, probabilities) ] else: return [Production(lhs, rhs) for rhs in rhsides] def standard_nonterm_parser(string, pos): m = _STANDARD_NONTERM_RE.match(string, pos) if not m: raise ValueError("Expected a nonterminal, found: " + string[pos:]) return (Nonterminal(m.group(1)), m.end()) The provided code snippet includes necessary dependencies for implementing the `_read_cfg_production` function. Write a Python function `def _read_cfg_production(input)` to solve the following problem: Return a list of context-free ``Productions``. Here is the function: def _read_cfg_production(input): """ Return a list of context-free ``Productions``. """ return _read_production(input, standard_nonterm_parser)
Return a list of context-free ``Productions``.
170,651
import re from functools import total_ordering from nltk.featstruct import SLASH, TYPE, FeatDict, FeatStruct, FeatStructReader from nltk.internals import raise_unorderable_types from nltk.probability import ImmutableProbabilisticMixIn from nltk.util import invert_graph, transitive_closure def _read_production(line, nonterm_parser, probabilistic=False): """ Parse a grammar rule, given as a string, and return a list of productions. """ pos = 0 # Parse the left-hand side. lhs, pos = nonterm_parser(line, pos) # Skip over the arrow. m = _ARROW_RE.match(line, pos) if not m: raise ValueError("Expected an arrow") pos = m.end() # Parse the right hand side. probabilities = [0.0] rhsides = [[]] while pos < len(line): # Probability. m = _PROBABILITY_RE.match(line, pos) if probabilistic and m: pos = m.end() probabilities[-1] = float(m.group(1)[1:-1]) if probabilities[-1] > 1.0: raise ValueError( "Production probability %f, " "should not be greater than 1.0" % (probabilities[-1],) ) # String -- add terminal. elif line[pos] in "'\"": m = _TERMINAL_RE.match(line, pos) if not m: raise ValueError("Unterminated string") rhsides[-1].append(m.group(1)[1:-1]) pos = m.end() # Vertical bar -- start new rhside. elif line[pos] == "|": m = _DISJUNCTION_RE.match(line, pos) probabilities.append(0.0) rhsides.append([]) pos = m.end() # Anything else -- nonterminal. else: nonterm, pos = nonterm_parser(line, pos) rhsides[-1].append(nonterm) if probabilistic: return [ ProbabilisticProduction(lhs, rhs, prob=probability) for (rhs, probability) in zip(rhsides, probabilities) ] else: return [Production(lhs, rhs) for rhs in rhsides] def standard_nonterm_parser(string, pos): m = _STANDARD_NONTERM_RE.match(string, pos) if not m: raise ValueError("Expected a nonterminal, found: " + string[pos:]) return (Nonterminal(m.group(1)), m.end()) The provided code snippet includes necessary dependencies for implementing the `_read_pcfg_production` function. Write a Python function `def _read_pcfg_production(input)` to solve the following problem: Return a list of PCFG ``ProbabilisticProductions``. Here is the function: def _read_pcfg_production(input): """ Return a list of PCFG ``ProbabilisticProductions``. """ return _read_production(input, standard_nonterm_parser, probabilistic=True)
Return a list of PCFG ``ProbabilisticProductions``.
170,652
import re from functools import total_ordering from nltk.featstruct import SLASH, TYPE, FeatDict, FeatStruct, FeatStructReader from nltk.internals import raise_unorderable_types from nltk.probability import ImmutableProbabilisticMixIn from nltk.util import invert_graph, transitive_closure def _read_production(line, nonterm_parser, probabilistic=False): """ Parse a grammar rule, given as a string, and return a list of productions. """ pos = 0 # Parse the left-hand side. lhs, pos = nonterm_parser(line, pos) # Skip over the arrow. m = _ARROW_RE.match(line, pos) if not m: raise ValueError("Expected an arrow") pos = m.end() # Parse the right hand side. probabilities = [0.0] rhsides = [[]] while pos < len(line): # Probability. m = _PROBABILITY_RE.match(line, pos) if probabilistic and m: pos = m.end() probabilities[-1] = float(m.group(1)[1:-1]) if probabilities[-1] > 1.0: raise ValueError( "Production probability %f, " "should not be greater than 1.0" % (probabilities[-1],) ) # String -- add terminal. elif line[pos] in "'\"": m = _TERMINAL_RE.match(line, pos) if not m: raise ValueError("Unterminated string") rhsides[-1].append(m.group(1)[1:-1]) pos = m.end() # Vertical bar -- start new rhside. elif line[pos] == "|": m = _DISJUNCTION_RE.match(line, pos) probabilities.append(0.0) rhsides.append([]) pos = m.end() # Anything else -- nonterminal. else: nonterm, pos = nonterm_parser(line, pos) rhsides[-1].append(nonterm) if probabilistic: return [ ProbabilisticProduction(lhs, rhs, prob=probability) for (rhs, probability) in zip(rhsides, probabilities) ] else: return [Production(lhs, rhs) for rhs in rhsides] The provided code snippet includes necessary dependencies for implementing the `_read_fcfg_production` function. Write a Python function `def _read_fcfg_production(input, fstruct_reader)` to solve the following problem: Return a list of feature-based ``Productions``. Here is the function: def _read_fcfg_production(input, fstruct_reader): """ Return a list of feature-based ``Productions``. """ return _read_production(input, fstruct_reader)
Return a list of feature-based ``Productions``.
170,653
import re from functools import total_ordering from nltk.featstruct import SLASH, TYPE, FeatDict, FeatStruct, FeatStructReader from nltk.internals import raise_unorderable_types from nltk.probability import ImmutableProbabilisticMixIn from nltk.util import invert_graph, transitive_closure def _read_production(line, nonterm_parser, probabilistic=False): """ Parse a grammar rule, given as a string, and return a list of productions. """ pos = 0 # Parse the left-hand side. lhs, pos = nonterm_parser(line, pos) # Skip over the arrow. m = _ARROW_RE.match(line, pos) if not m: raise ValueError("Expected an arrow") pos = m.end() # Parse the right hand side. probabilities = [0.0] rhsides = [[]] while pos < len(line): # Probability. m = _PROBABILITY_RE.match(line, pos) if probabilistic and m: pos = m.end() probabilities[-1] = float(m.group(1)[1:-1]) if probabilities[-1] > 1.0: raise ValueError( "Production probability %f, " "should not be greater than 1.0" % (probabilities[-1],) ) # String -- add terminal. elif line[pos] in "'\"": m = _TERMINAL_RE.match(line, pos) if not m: raise ValueError("Unterminated string") rhsides[-1].append(m.group(1)[1:-1]) pos = m.end() # Vertical bar -- start new rhside. elif line[pos] == "|": m = _DISJUNCTION_RE.match(line, pos) probabilities.append(0.0) rhsides.append([]) pos = m.end() # Anything else -- nonterminal. else: nonterm, pos = nonterm_parser(line, pos) rhsides[-1].append(nonterm) if probabilistic: return [ ProbabilisticProduction(lhs, rhs, prob=probability) for (rhs, probability) in zip(rhsides, probabilities) ] else: return [Production(lhs, rhs) for rhs in rhsides] The provided code snippet includes necessary dependencies for implementing the `read_grammar` function. Write a Python function `def read_grammar(input, nonterm_parser, probabilistic=False, encoding=None)` to solve the following problem: Return a pair consisting of a starting category and a list of ``Productions``. :param input: a grammar, either in the form of a string or else as a list of strings. :param nonterm_parser: a function for parsing nonterminals. It should take a ``(string, position)`` as argument and return a ``(nonterminal, position)`` as result. :param probabilistic: are the grammar rules probabilistic? :type probabilistic: bool :param encoding: the encoding of the grammar, if it is a binary string :type encoding: str Here is the function: def read_grammar(input, nonterm_parser, probabilistic=False, encoding=None): """ Return a pair consisting of a starting category and a list of ``Productions``. :param input: a grammar, either in the form of a string or else as a list of strings. :param nonterm_parser: a function for parsing nonterminals. It should take a ``(string, position)`` as argument and return a ``(nonterminal, position)`` as result. :param probabilistic: are the grammar rules probabilistic? :type probabilistic: bool :param encoding: the encoding of the grammar, if it is a binary string :type encoding: str """ if encoding is not None: input = input.decode(encoding) if isinstance(input, str): lines = input.split("\n") else: lines = input start = None productions = [] continue_line = "" for linenum, line in enumerate(lines): line = continue_line + line.strip() if line.startswith("#") or line == "": continue if line.endswith("\\"): continue_line = line[:-1].rstrip() + " " continue continue_line = "" try: if line[0] == "%": directive, args = line[1:].split(None, 1) if directive == "start": start, pos = nonterm_parser(args, 0) if pos != len(args): raise ValueError("Bad argument to start directive") else: raise ValueError("Bad directive") else: # expand out the disjunctions on the RHS productions += _read_production(line, nonterm_parser, probabilistic) except ValueError as e: raise ValueError(f"Unable to parse line {linenum + 1}: {line}\n{e}") from e if not productions: raise ValueError("No productions found!") if not start: start = productions[0].lhs() return (start, productions)
Return a pair consisting of a starting category and a list of ``Productions``. :param input: a grammar, either in the form of a string or else as a list of strings. :param nonterm_parser: a function for parsing nonterminals. It should take a ``(string, position)`` as argument and return a ``(nonterminal, position)`` as result. :param probabilistic: are the grammar rules probabilistic? :type probabilistic: bool :param encoding: the encoding of the grammar, if it is a binary string :type encoding: str
170,654
import re from functools import total_ordering from nltk.featstruct import SLASH, TYPE, FeatDict, FeatStruct, FeatStructReader from nltk.internals import raise_unorderable_types from nltk.probability import ImmutableProbabilisticMixIn from nltk.util import invert_graph, transitive_closure class DependencyProduction(Production): """ A dependency grammar production. Each production maps a single head word to an unordered list of one or more modifier words. """ def __str__(self): """ Return a verbose string representation of the ``DependencyProduction``. :rtype: str """ result = f"'{self._lhs}' ->" for elt in self._rhs: result += f" '{elt}'" return result _READ_DG_RE = re.compile( r"""^\s* # leading whitespace ('[^']+')\s* # single-quoted lhs (?:[-=]+>)\s* # arrow (?:( # rhs: "[^"]+" # doubled-quoted terminal | '[^']+' # single-quoted terminal | \| # disjunction ) \s*) # trailing space *$""", # zero or more copies re.VERBOSE, ) _SPLIT_DG_RE = re.compile(r"""('[^']'|[-=]+>|"[^"]+"|'[^']+'|\|)""") def _read_dependency_production(s): if not _READ_DG_RE.match(s): raise ValueError("Bad production string") pieces = _SPLIT_DG_RE.split(s) pieces = [p for i, p in enumerate(pieces) if i % 2 == 1] lhside = pieces[0].strip("'\"") rhsides = [[]] for piece in pieces[2:]: if piece == "|": rhsides.append([]) else: rhsides[-1].append(piece.strip("'\"")) return [DependencyProduction(lhside, rhside) for rhside in rhsides]
null
170,655
import re from functools import total_ordering from nltk.featstruct import SLASH, TYPE, FeatDict, FeatStruct, FeatStructReader from nltk.internals import raise_unorderable_types from nltk.probability import ImmutableProbabilisticMixIn from nltk.util import invert_graph, transitive_closure def cfg_demo(): """ A demonstration showing how ``CFGs`` can be created and used. """ from nltk import CFG, Production, nonterminals # Create some nonterminals S, NP, VP, PP = nonterminals("S, NP, VP, PP") N, V, P, Det = nonterminals("N, V, P, Det") VP_slash_NP = VP / NP print("Some nonterminals:", [S, NP, VP, PP, N, V, P, Det, VP / NP]) print(" S.symbol() =>", repr(S.symbol())) print() print(Production(S, [NP])) # Create some Grammar Productions grammar = CFG.fromstring( """ S -> NP VP PP -> P NP NP -> Det N | NP PP VP -> V NP | VP PP Det -> 'a' | 'the' N -> 'dog' | 'cat' V -> 'chased' | 'sat' P -> 'on' | 'in' """ ) print("A Grammar:", repr(grammar)) print(" grammar.start() =>", repr(grammar.start())) print(" grammar.productions() =>", end=" ") # Use string.replace(...) is to line-wrap the output. print(repr(grammar.productions()).replace(",", ",\n" + " " * 25)) print() def pcfg_demo(): """ A demonstration showing how a ``PCFG`` can be created and used. """ from nltk import induce_pcfg, treetransforms from nltk.corpus import treebank from nltk.parse import pchart toy_pcfg1 = PCFG.fromstring( """ S -> NP VP [1.0] NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] Det -> 'the' [0.8] | 'my' [0.2] N -> 'man' [0.5] | 'telescope' [0.5] VP -> VP PP [0.1] | V NP [0.7] | V [0.2] V -> 'ate' [0.35] | 'saw' [0.65] PP -> P NP [1.0] P -> 'with' [0.61] | 'under' [0.39] """ ) toy_pcfg2 = PCFG.fromstring( """ S -> NP VP [1.0] VP -> V NP [.59] VP -> V [.40] VP -> VP PP [.01] NP -> Det N [.41] NP -> Name [.28] NP -> NP PP [.31] PP -> P NP [1.0] V -> 'saw' [.21] V -> 'ate' [.51] V -> 'ran' [.28] N -> 'boy' [.11] N -> 'cookie' [.12] N -> 'table' [.13] N -> 'telescope' [.14] N -> 'hill' [.5] Name -> 'Jack' [.52] Name -> 'Bob' [.48] P -> 'with' [.61] P -> 'under' [.39] Det -> 'the' [.41] Det -> 'a' [.31] Det -> 'my' [.28] """ ) pcfg_prods = toy_pcfg1.productions() pcfg_prod = pcfg_prods[2] print("A PCFG production:", repr(pcfg_prod)) print(" pcfg_prod.lhs() =>", repr(pcfg_prod.lhs())) print(" pcfg_prod.rhs() =>", repr(pcfg_prod.rhs())) print(" pcfg_prod.prob() =>", repr(pcfg_prod.prob())) print() grammar = toy_pcfg2 print("A PCFG grammar:", repr(grammar)) print(" grammar.start() =>", repr(grammar.start())) print(" grammar.productions() =>", end=" ") # Use .replace(...) is to line-wrap the output. print(repr(grammar.productions()).replace(",", ",\n" + " " * 26)) print() # extract productions from three trees and induce the PCFG print("Induce PCFG grammar from treebank data:") productions = [] item = treebank._fileids[0] for tree in treebank.parsed_sents(item)[:3]: # perform optional tree transformations, e.g.: tree.collapse_unary(collapsePOS=False) tree.chomsky_normal_form(horzMarkov=2) productions += tree.productions() S = Nonterminal("S") grammar = induce_pcfg(S, productions) print(grammar) print() print("Parse sentence using induced grammar:") parser = pchart.InsideChartParser(grammar) parser.trace(3) # doesn't work as tokens are different: # sent = treebank.tokenized('wsj_0001.mrg')[0] sent = treebank.parsed_sents(item)[0].leaves() print(sent) for parse in parser.parse(sent): print(parse) def fcfg_demo(): import nltk.data g = nltk.data.load("grammars/book_grammars/feat0.fcfg") print(g) print() def dg_demo(): """ A demonstration showing the creation and inspection of a ``DependencyGrammar``. """ grammar = DependencyGrammar.fromstring( """ 'scratch' -> 'cats' | 'walls' 'walls' -> 'the' 'cats' -> 'the' """ ) print(grammar) def sdg_demo(): """ A demonstration of how to read a string representation of a CoNLL format dependency tree. """ from nltk.parse import DependencyGraph dg = DependencyGraph( """ 1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _ 2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _ 3 met met Prep Prep voor 8 mod _ _ 4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _ 5 moeder moeder N N soort|ev|neut 3 obj1 _ _ 6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _ 7 gaan ga V V hulp|inf 6 vc _ _ 8 winkelen winkel V V intrans|inf 11 cnj _ _ 9 , , Punc Punc komma 8 punct _ _ 10 zwemmen zwem V V intrans|inf 11 cnj _ _ 11 of of Conj Conj neven 7 vc _ _ 12 terrassen terras N N soort|mv|neut 11 cnj _ _ 13 . . Punc Punc punt 12 punct _ _ """ ) tree = dg.tree() print(tree.pprint()) def demo(): cfg_demo() pcfg_demo() fcfg_demo() dg_demo() sdg_demo()
null
170,656
from nltk.tree.tree import Tree def chomsky_normal_form( tree, factor="right", horzMarkov=None, vertMarkov=0, childChar="|", parentChar="^" ): # assume all subtrees have homogeneous children # assume all terminals have no siblings # A semi-hack to have elegant looking code below. As a result, # any subtree with a branching factor greater than 999 will be incorrectly truncated. if horzMarkov is None: horzMarkov = 999 # Traverse the tree depth-first keeping a list of ancestor nodes to the root. # I chose not to use the tree.treepositions() method since it requires # two traversals of the tree (one to get the positions, one to iterate # over them) and node access time is proportional to the height of the node. # This method is 7x faster which helps when parsing 40,000 sentences. nodeList = [(tree, [tree.label()])] while nodeList != []: node, parent = nodeList.pop() if isinstance(node, Tree): # parent annotation parentString = "" originalNode = node.label() if vertMarkov != 0 and node != tree and isinstance(node[0], Tree): parentString = "{}<{}>".format(parentChar, "-".join(parent)) node.set_label(node.label() + parentString) parent = [originalNode] + parent[: vertMarkov - 1] # add children to the agenda before we mess with them for child in node: nodeList.append((child, parent)) # chomsky normal form factorization if len(node) > 2: childNodes = [child.label() for child in node] nodeCopy = node.copy() node[0:] = [] # delete the children curNode = node numChildren = len(nodeCopy) for i in range(1, numChildren - 1): if factor == "right": newHead = "{}{}<{}>{}".format( originalNode, childChar, "-".join( childNodes[i : min([i + horzMarkov, numChildren])] ), parentString, ) # create new head newNode = Tree(newHead, []) curNode[0:] = [nodeCopy.pop(0), newNode] else: newHead = "{}{}<{}>{}".format( originalNode, childChar, "-".join( childNodes[max([numChildren - i - horzMarkov, 0]) : -i] ), parentString, ) newNode = Tree(newHead, []) curNode[0:] = [newNode, nodeCopy.pop()] curNode = newNode curNode[0:] = [child for child in nodeCopy] def un_chomsky_normal_form( tree, expandUnary=True, childChar="|", parentChar="^", unaryChar="+" ): # Traverse the tree-depth first keeping a pointer to the parent for modification purposes. nodeList = [(tree, [])] while nodeList != []: node, parent = nodeList.pop() if isinstance(node, Tree): # if the node contains the 'childChar' character it means that # it is an artificial node and can be removed, although we still need # to move its children to its parent childIndex = node.label().find(childChar) if childIndex != -1: nodeIndex = parent.index(node) parent.remove(parent[nodeIndex]) # Generated node was on the left if the nodeIndex is 0 which # means the grammar was left factored. We must insert the children # at the beginning of the parent's children if nodeIndex == 0: parent.insert(0, node[0]) parent.insert(1, node[1]) else: parent.extend([node[0], node[1]]) # parent is now the current node so the children of parent will be added to the agenda node = parent else: parentIndex = node.label().find(parentChar) if parentIndex != -1: # strip the node name of the parent annotation node.set_label(node.label()[:parentIndex]) # expand collapsed unary productions if expandUnary == True: unaryIndex = node.label().find(unaryChar) if unaryIndex != -1: newNode = Tree( node.label()[unaryIndex + 1 :], [i for i in node] ) node.set_label(node.label()[:unaryIndex]) node[0:] = [newNode] for child in node: nodeList.append((child, node)) def collapse_unary(tree, collapsePOS=False, collapseRoot=False, joinChar="+"): """ Collapse subtrees with a single child (ie. unary productions) into a new non-terminal (Tree node) joined by 'joinChar'. This is useful when working with algorithms that do not allow unary productions, and completely removing the unary productions would require loss of useful information. The Tree is modified directly (since it is passed by reference) and no value is returned. :param tree: The Tree to be collapsed :type tree: Tree :param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie. Part-of-Speech tags) since they are always unary productions :type collapsePOS: bool :param collapseRoot: 'False' (default) will not modify the root production if it is unary. For the Penn WSJ treebank corpus, this corresponds to the TOP -> productions. :type collapseRoot: bool :param joinChar: A string used to connect collapsed node values (default = "+") :type joinChar: str """ if collapseRoot == False and isinstance(tree, Tree) and len(tree) == 1: nodeList = [tree[0]] else: nodeList = [tree] # depth-first traversal of tree while nodeList != []: node = nodeList.pop() if isinstance(node, Tree): if ( len(node) == 1 and isinstance(node[0], Tree) and (collapsePOS == True or isinstance(node[0, 0], Tree)) ): node.set_label(node.label() + joinChar + node[0].label()) node[0:] = [child for child in node[0]] # since we assigned the child's children to the current node, # evaluate the current node again nodeList.append(node) else: for child in node: nodeList.append(child) class Tree(list): r""" A Tree represents a hierarchical grouping of leaves and subtrees. For example, each constituent in a syntax tree is represented by a single Tree. A tree's children are encoded as a list of leaves and subtrees, where a leaf is a basic (non-tree) value; and a subtree is a nested Tree. >>> from nltk.tree import Tree >>> print(Tree(1, [2, Tree(3, [4]), 5])) (1 2 (3 4) 5) >>> vp = Tree('VP', [Tree('V', ['saw']), ... Tree('NP', ['him'])]) >>> s = Tree('S', [Tree('NP', ['I']), vp]) >>> print(s) (S (NP I) (VP (V saw) (NP him))) >>> print(s[1]) (VP (V saw) (NP him)) >>> print(s[1,1]) (NP him) >>> t = Tree.fromstring("(S (NP I) (VP (V saw) (NP him)))") >>> s == t True >>> t[1][1].set_label('X') >>> t[1][1].label() 'X' >>> print(t) (S (NP I) (VP (V saw) (X him))) >>> t[0], t[1,1] = t[1,1], t[0] >>> print(t) (S (X him) (VP (V saw) (NP I))) The length of a tree is the number of children it has. >>> len(t) 2 The set_label() and label() methods allow individual constituents to be labeled. For example, syntax trees use this label to specify phrase tags, such as "NP" and "VP". Several Tree methods use "tree positions" to specify children or descendants of a tree. Tree positions are defined as follows: - The tree position *i* specifies a Tree's *i*\ th child. - The tree position ``()`` specifies the Tree itself. - If *p* is the tree position of descendant *d*, then *p+i* specifies the *i*\ th child of *d*. I.e., every tree position is either a single index *i*, specifying ``tree[i]``; or a sequence *i1, i2, ..., iN*, specifying ``tree[i1][i2]...[iN]``. Construct a new tree. This constructor can be called in one of two ways: - ``Tree(label, children)`` constructs a new tree with the specified label and list of children. - ``Tree.fromstring(s)`` constructs a new tree by parsing the string ``s``. """ def __init__(self, node, children=None): if children is None: raise TypeError( "%s: Expected a node value and child list " % type(self).__name__ ) elif isinstance(children, str): raise TypeError( "%s() argument 2 should be a list, not a " "string" % type(self).__name__ ) else: list.__init__(self, children) self._label = node # //////////////////////////////////////////////////////////// # Comparison operators # //////////////////////////////////////////////////////////// def __eq__(self, other): return self.__class__ is other.__class__ and (self._label, list(self)) == ( other._label, list(other), ) def __lt__(self, other): if not isinstance(other, Tree): # raise_unorderable_types("<", self, other) # Sometimes children can be pure strings, # so we need to be able to compare with non-trees: return self.__class__.__name__ < other.__class__.__name__ elif self.__class__ is other.__class__: return (self._label, list(self)) < (other._label, list(other)) else: return self.__class__.__name__ < other.__class__.__name__ # @total_ordering doesn't work here, since the class inherits from a builtin class __ne__ = lambda self, other: not self == other __gt__ = lambda self, other: not (self < other or self == other) __le__ = lambda self, other: self < other or self == other __ge__ = lambda self, other: not self < other # //////////////////////////////////////////////////////////// # Disabled list operations # //////////////////////////////////////////////////////////// def __mul__(self, v): raise TypeError("Tree does not support multiplication") def __rmul__(self, v): raise TypeError("Tree does not support multiplication") def __add__(self, v): raise TypeError("Tree does not support addition") def __radd__(self, v): raise TypeError("Tree does not support addition") # //////////////////////////////////////////////////////////// # Indexing (with support for tree positions) # //////////////////////////////////////////////////////////// def __getitem__(self, index): if isinstance(index, (int, slice)): return list.__getitem__(self, index) elif isinstance(index, (list, tuple)): if len(index) == 0: return self elif len(index) == 1: return self[index[0]] else: return self[index[0]][index[1:]] else: raise TypeError( "%s indices must be integers, not %s" % (type(self).__name__, type(index).__name__) ) def __setitem__(self, index, value): if isinstance(index, (int, slice)): return list.__setitem__(self, index, value) elif isinstance(index, (list, tuple)): if len(index) == 0: raise IndexError("The tree position () may not be " "assigned to.") elif len(index) == 1: self[index[0]] = value else: self[index[0]][index[1:]] = value else: raise TypeError( "%s indices must be integers, not %s" % (type(self).__name__, type(index).__name__) ) def __delitem__(self, index): if isinstance(index, (int, slice)): return list.__delitem__(self, index) elif isinstance(index, (list, tuple)): if len(index) == 0: raise IndexError("The tree position () may not be deleted.") elif len(index) == 1: del self[index[0]] else: del self[index[0]][index[1:]] else: raise TypeError( "%s indices must be integers, not %s" % (type(self).__name__, type(index).__name__) ) # //////////////////////////////////////////////////////////// # Basic tree operations # //////////////////////////////////////////////////////////// def _get_node(self): """Outdated method to access the node value; use the label() method instead.""" def _set_node(self, value): """Outdated method to set the node value; use the set_label() method instead.""" node = property(_get_node, _set_node) def label(self): """ Return the node label of the tree. >>> t = Tree.fromstring('(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))') >>> t.label() 'S' :return: the node label (typically a string) :rtype: any """ return self._label def set_label(self, label): """ Set the node label of the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.set_label("T") >>> print(t) (T (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat)))) :param label: the node label (typically a string) :type label: any """ self._label = label def leaves(self): """ Return the leaves of the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.leaves() ['the', 'dog', 'chased', 'the', 'cat'] :return: a list containing this tree's leaves. The order reflects the order of the leaves in the tree's hierarchical structure. :rtype: list """ leaves = [] for child in self: if isinstance(child, Tree): leaves.extend(child.leaves()) else: leaves.append(child) return leaves def flatten(self): """ Return a flat version of the tree, with all non-root non-terminals removed. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> print(t.flatten()) (S the dog chased the cat) :return: a tree consisting of this tree's root connected directly to its leaves, omitting all intervening non-terminal nodes. :rtype: Tree """ return Tree(self.label(), self.leaves()) def height(self): """ Return the height of the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.height() 5 >>> print(t[0,0]) (D the) >>> t[0,0].height() 2 :return: The height of this tree. The height of a tree containing no children is 1; the height of a tree containing only leaves is 2; and the height of any other tree is one plus the maximum of its children's heights. :rtype: int """ max_child_height = 0 for child in self: if isinstance(child, Tree): max_child_height = max(max_child_height, child.height()) else: max_child_height = max(max_child_height, 1) return 1 + max_child_height def treepositions(self, order="preorder"): """ >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.treepositions() # doctest: +ELLIPSIS [(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0), (1, 0, 0), ...] >>> for pos in t.treepositions('leaves'): ... t[pos] = t[pos][::-1].upper() >>> print(t) (S (NP (D EHT) (N GOD)) (VP (V DESAHC) (NP (D EHT) (N TAC)))) :param order: One of: ``preorder``, ``postorder``, ``bothorder``, ``leaves``. """ positions = [] if order in ("preorder", "bothorder"): positions.append(()) for i, child in enumerate(self): if isinstance(child, Tree): childpos = child.treepositions(order) positions.extend((i,) + p for p in childpos) else: positions.append((i,)) if order in ("postorder", "bothorder"): positions.append(()) return positions def subtrees(self, filter=None): """ Generate all the subtrees of this tree, optionally restricted to trees matching the filter function. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> for s in t.subtrees(lambda t: t.height() == 2): ... print(s) (D the) (N dog) (V chased) (D the) (N cat) :type filter: function :param filter: the function to filter all local trees """ if not filter or filter(self): yield self for child in self: if isinstance(child, Tree): yield from child.subtrees(filter) def productions(self): """ Generate the productions that correspond to the non-terminal nodes of the tree. For each subtree of the form (P: C1 C2 ... Cn) this produces a production of the form P -> C1 C2 ... Cn. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.productions() # doctest: +NORMALIZE_WHITESPACE [S -> NP VP, NP -> D N, D -> 'the', N -> 'dog', VP -> V NP, V -> 'chased', NP -> D N, D -> 'the', N -> 'cat'] :rtype: list(Production) """ if not isinstance(self._label, str): raise TypeError( "Productions can only be generated from trees having node labels that are strings" ) prods = [Production(Nonterminal(self._label), _child_names(self))] for child in self: if isinstance(child, Tree): prods += child.productions() return prods def pos(self): """ Return a sequence of pos-tagged words extracted from the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.pos() [('the', 'D'), ('dog', 'N'), ('chased', 'V'), ('the', 'D'), ('cat', 'N')] :return: a list of tuples containing leaves and pre-terminals (part-of-speech tags). The order reflects the order of the leaves in the tree's hierarchical structure. :rtype: list(tuple) """ pos = [] for child in self: if isinstance(child, Tree): pos.extend(child.pos()) else: pos.append((child, self._label)) return pos def leaf_treeposition(self, index): """ :return: The tree position of the ``index``-th leaf in this tree. I.e., if ``tp=self.leaf_treeposition(i)``, then ``self[tp]==self.leaves()[i]``. :raise IndexError: If this tree contains fewer than ``index+1`` leaves, or if ``index<0``. """ if index < 0: raise IndexError("index must be non-negative") stack = [(self, ())] while stack: value, treepos = stack.pop() if not isinstance(value, Tree): if index == 0: return treepos else: index -= 1 else: for i in range(len(value) - 1, -1, -1): stack.append((value[i], treepos + (i,))) raise IndexError("index must be less than or equal to len(self)") def treeposition_spanning_leaves(self, start, end): """ :return: The tree position of the lowest descendant of this tree that dominates ``self.leaves()[start:end]``. :raise ValueError: if ``end <= start`` """ if end <= start: raise ValueError("end must be greater than start") # Find the tree positions of the start & end leaves, and # take the longest common subsequence. start_treepos = self.leaf_treeposition(start) end_treepos = self.leaf_treeposition(end - 1) # Find the first index where they mismatch: for i in range(len(start_treepos)): if i == len(end_treepos) or start_treepos[i] != end_treepos[i]: return start_treepos[:i] return start_treepos # //////////////////////////////////////////////////////////// # Transforms # //////////////////////////////////////////////////////////// def chomsky_normal_form( self, factor="right", horzMarkov=None, vertMarkov=0, childChar="|", parentChar="^", ): """ This method can modify a tree in three ways: 1. Convert a tree into its Chomsky Normal Form (CNF) equivalent -- Every subtree has either two non-terminals or one terminal as its children. This process requires the creation of more"artificial" non-terminal nodes. 2. Markov (vertical) smoothing of children in new artificial nodes 3. Horizontal (parent) annotation of nodes :param factor: Right or left factoring method (default = "right") :type factor: str = [left|right] :param horzMarkov: Markov order for sibling smoothing in artificial nodes (None (default) = include all siblings) :type horzMarkov: int | None :param vertMarkov: Markov order for parent smoothing (0 (default) = no vertical annotation) :type vertMarkov: int | None :param childChar: A string used in construction of the artificial nodes, separating the head of the original subtree from the child nodes that have yet to be expanded (default = "|") :type childChar: str :param parentChar: A string used to separate the node representation from its vertical annotation :type parentChar: str """ from nltk.tree.transforms import chomsky_normal_form chomsky_normal_form(self, factor, horzMarkov, vertMarkov, childChar, parentChar) def un_chomsky_normal_form( self, expandUnary=True, childChar="|", parentChar="^", unaryChar="+" ): """ This method modifies the tree in three ways: 1. Transforms a tree in Chomsky Normal Form back to its original structure (branching greater than two) 2. Removes any parent annotation (if it exists) 3. (optional) expands unary subtrees (if previously collapsed with collapseUnary(...) ) :param expandUnary: Flag to expand unary or not (default = True) :type expandUnary: bool :param childChar: A string separating the head node from its children in an artificial node (default = "|") :type childChar: str :param parentChar: A string separating the node label from its parent annotation (default = "^") :type parentChar: str :param unaryChar: A string joining two non-terminals in a unary production (default = "+") :type unaryChar: str """ from nltk.tree.transforms import un_chomsky_normal_form un_chomsky_normal_form(self, expandUnary, childChar, parentChar, unaryChar) def collapse_unary(self, collapsePOS=False, collapseRoot=False, joinChar="+"): """ Collapse subtrees with a single child (ie. unary productions) into a new non-terminal (Tree node) joined by 'joinChar'. This is useful when working with algorithms that do not allow unary productions, and completely removing the unary productions would require loss of useful information. The Tree is modified directly (since it is passed by reference) and no value is returned. :param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie. Part-of-Speech tags) since they are always unary productions :type collapsePOS: bool :param collapseRoot: 'False' (default) will not modify the root production if it is unary. For the Penn WSJ treebank corpus, this corresponds to the TOP -> productions. :type collapseRoot: bool :param joinChar: A string used to connect collapsed node values (default = "+") :type joinChar: str """ from nltk.tree.transforms import collapse_unary collapse_unary(self, collapsePOS, collapseRoot, joinChar) # //////////////////////////////////////////////////////////// # Convert, copy # //////////////////////////////////////////////////////////// def convert(cls, tree): """ Convert a tree between different subtypes of Tree. ``cls`` determines which class will be used to encode the new tree. :type tree: Tree :param tree: The tree that should be converted. :return: The new Tree. """ if isinstance(tree, Tree): children = [cls.convert(child) for child in tree] return cls(tree._label, children) else: return tree def __copy__(self): return self.copy() def __deepcopy__(self, memo): return self.copy(deep=True) def copy(self, deep=False): if not deep: return type(self)(self._label, self) else: return type(self).convert(self) def _frozen_class(self): from nltk.tree.immutable import ImmutableTree return ImmutableTree def freeze(self, leaf_freezer=None): frozen_class = self._frozen_class() if leaf_freezer is None: newcopy = frozen_class.convert(self) else: newcopy = self.copy(deep=True) for pos in newcopy.treepositions("leaves"): newcopy[pos] = leaf_freezer(newcopy[pos]) newcopy = frozen_class.convert(newcopy) hash(newcopy) # Make sure the leaves are hashable. return newcopy # //////////////////////////////////////////////////////////// # Parsing # //////////////////////////////////////////////////////////// def fromstring( cls, s, brackets="()", read_node=None, read_leaf=None, node_pattern=None, leaf_pattern=None, remove_empty_top_bracketing=False, ): """ Read a bracketed tree string and return the resulting tree. Trees are represented as nested brackettings, such as:: (S (NP (NNP John)) (VP (V runs))) :type s: str :param s: The string to read :type brackets: str (length=2) :param brackets: The bracket characters used to mark the beginning and end of trees and subtrees. :type read_node: function :type read_leaf: function :param read_node, read_leaf: If specified, these functions are applied to the substrings of ``s`` corresponding to nodes and leaves (respectively) to obtain the values for those nodes and leaves. They should have the following signature: read_node(str) -> value For example, these functions could be used to process nodes and leaves whose values should be some type other than string (such as ``FeatStruct``). Note that by default, node strings and leaf strings are delimited by whitespace and brackets; to override this default, use the ``node_pattern`` and ``leaf_pattern`` arguments. :type node_pattern: str :type leaf_pattern: str :param node_pattern, leaf_pattern: Regular expression patterns used to find node and leaf substrings in ``s``. By default, both nodes patterns are defined to match any sequence of non-whitespace non-bracket characters. :type remove_empty_top_bracketing: bool :param remove_empty_top_bracketing: If the resulting tree has an empty node label, and is length one, then return its single child instead. This is useful for treebank trees, which sometimes contain an extra level of bracketing. :return: A tree corresponding to the string representation ``s``. If this class method is called using a subclass of Tree, then it will return a tree of that type. :rtype: Tree """ if not isinstance(brackets, str) or len(brackets) != 2: raise TypeError("brackets must be a length-2 string") if re.search(r"\s", brackets): raise TypeError("whitespace brackets not allowed") # Construct a regexp that will tokenize the string. open_b, close_b = brackets open_pattern, close_pattern = (re.escape(open_b), re.escape(close_b)) if node_pattern is None: node_pattern = rf"[^\s{open_pattern}{close_pattern}]+" if leaf_pattern is None: leaf_pattern = rf"[^\s{open_pattern}{close_pattern}]+" token_re = re.compile( r"%s\s*(%s)?|%s|(%s)" % (open_pattern, node_pattern, close_pattern, leaf_pattern) ) # Walk through each token, updating a stack of trees. stack = [(None, [])] # list of (node, children) tuples for match in token_re.finditer(s): token = match.group() # Beginning of a tree/subtree if token[0] == open_b: if len(stack) == 1 and len(stack[0][1]) > 0: cls._parse_error(s, match, "end-of-string") label = token[1:].lstrip() if read_node is not None: label = read_node(label) stack.append((label, [])) # End of a tree/subtree elif token == close_b: if len(stack) == 1: if len(stack[0][1]) == 0: cls._parse_error(s, match, open_b) else: cls._parse_error(s, match, "end-of-string") label, children = stack.pop() stack[-1][1].append(cls(label, children)) # Leaf node else: if len(stack) == 1: cls._parse_error(s, match, open_b) if read_leaf is not None: token = read_leaf(token) stack[-1][1].append(token) # check that we got exactly one complete tree. if len(stack) > 1: cls._parse_error(s, "end-of-string", close_b) elif len(stack[0][1]) == 0: cls._parse_error(s, "end-of-string", open_b) else: assert stack[0][0] is None assert len(stack[0][1]) == 1 tree = stack[0][1][0] # If the tree has an extra level with node='', then get rid of # it. E.g.: "((S (NP ...) (VP ...)))" if remove_empty_top_bracketing and tree._label == "" and len(tree) == 1: tree = tree[0] # return the tree. return tree def _parse_error(cls, s, match, expecting): """ Display a friendly error message when parsing a tree string fails. :param s: The string we're parsing. :param match: regexp match of the problem token. :param expecting: what we expected to see instead. """ # Construct a basic error message if match == "end-of-string": pos, token = len(s), "end-of-string" else: pos, token = match.start(), match.group() msg = "%s.read(): expected %r but got %r\n%sat index %d." % ( cls.__name__, expecting, token, " " * 12, pos, ) # Add a display showing the error token itsels: s = s.replace("\n", " ").replace("\t", " ") offset = pos if len(s) > pos + 10: s = s[: pos + 10] + "..." if pos > 10: s = "..." + s[pos - 10 :] offset = 13 msg += '\n{}"{}"\n{}^'.format(" " * 16, s, " " * (17 + offset)) raise ValueError(msg) def fromlist(cls, l): """ :type l: list :param l: a tree represented as nested lists :return: A tree corresponding to the list representation ``l``. :rtype: Tree Convert nested lists to a NLTK Tree """ if type(l) == list and len(l) > 0: label = repr(l[0]) if len(l) > 1: return Tree(label, [cls.fromlist(child) for child in l[1:]]) else: return label # //////////////////////////////////////////////////////////// # Visualization & String Representation # //////////////////////////////////////////////////////////// def draw(self): """ Open a new window containing a graphical diagram of this tree. """ from nltk.draw.tree import draw_trees draw_trees(self) def pretty_print(self, sentence=None, highlight=(), stream=None, **kwargs): """ Pretty-print this tree as ASCII or Unicode art. For explanation of the arguments, see the documentation for `nltk.tree.prettyprinter.TreePrettyPrinter`. """ from nltk.tree.prettyprinter import TreePrettyPrinter print(TreePrettyPrinter(self, sentence, highlight).text(**kwargs), file=stream) def __repr__(self): childstr = ", ".join(repr(c) for c in self) return "{}({}, [{}])".format( type(self).__name__, repr(self._label), childstr, ) def _repr_svg_(self): from svgling import draw_tree return draw_tree(self)._repr_svg_() def __str__(self): return self.pformat() def pprint(self, **kwargs): """ Print a string representation of this Tree to 'stream' """ if "stream" in kwargs: stream = kwargs["stream"] del kwargs["stream"] else: stream = None print(self.pformat(**kwargs), file=stream) def pformat(self, margin=70, indent=0, nodesep="", parens="()", quotes=False): """ :return: A pretty-printed string representation of this tree. :rtype: str :param margin: The right margin at which to do line-wrapping. :type margin: int :param indent: The indentation level at which printing begins. This number is used to decide how far to indent subsequent lines. :type indent: int :param nodesep: A string that is used to separate the node from the children. E.g., the default value ``':'`` gives trees like ``(S: (NP: I) (VP: (V: saw) (NP: it)))``. """ # Try writing it on one line. s = self._pformat_flat(nodesep, parens, quotes) if len(s) + indent < margin: return s # If it doesn't fit on one line, then write it on multi-lines. if isinstance(self._label, str): s = f"{parens[0]}{self._label}{nodesep}" else: s = f"{parens[0]}{repr(self._label)}{nodesep}" for child in self: if isinstance(child, Tree): s += ( "\n" + " " * (indent + 2) + child.pformat(margin, indent + 2, nodesep, parens, quotes) ) elif isinstance(child, tuple): s += "\n" + " " * (indent + 2) + "/".join(child) elif isinstance(child, str) and not quotes: s += "\n" + " " * (indent + 2) + "%s" % child else: s += "\n" + " " * (indent + 2) + repr(child) return s + parens[1] def pformat_latex_qtree(self): r""" Returns a representation of the tree compatible with the LaTeX qtree package. This consists of the string ``\Tree`` followed by the tree represented in bracketed notation. For example, the following result was generated from a parse tree of the sentence ``The announcement astounded us``:: \Tree [.I'' [.N'' [.D The ] [.N' [.N announcement ] ] ] [.I' [.V'' [.V' [.V astounded ] [.N'' [.N' [.N us ] ] ] ] ] ] ] See https://www.ling.upenn.edu/advice/latex.html for the LaTeX style file for the qtree package. :return: A latex qtree representation of this tree. :rtype: str """ reserved_chars = re.compile(r"([#\$%&~_\{\}])") pformat = self.pformat(indent=6, nodesep="", parens=("[.", " ]")) return r"\Tree " + re.sub(reserved_chars, r"\\\1", pformat) def _pformat_flat(self, nodesep, parens, quotes): childstrs = [] for child in self: if isinstance(child, Tree): childstrs.append(child._pformat_flat(nodesep, parens, quotes)) elif isinstance(child, tuple): childstrs.append("/".join(child)) elif isinstance(child, str) and not quotes: childstrs.append("%s" % child) else: childstrs.append(repr(child)) if isinstance(self._label, str): return "{}{}{} {}{}".format( parens[0], self._label, nodesep, " ".join(childstrs), parens[1], ) else: return "{}{}{} {}{}".format( parens[0], repr(self._label), nodesep, " ".join(childstrs), parens[1], ) def deepcopy(x: _T, memo: Optional[Dict[int, Any]] = ..., _nil: Any = ...) -> _T: ... def draw_trees(*trees): """ Open a new window containing a graphical diagram of the given trees. :rtype: None """ TreeView(*trees).mainloop() return The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem: A demonstration showing how each tree transform can be used. Here is the function: def demo(): """ A demonstration showing how each tree transform can be used. """ from copy import deepcopy from nltk.draw.tree import draw_trees from nltk.tree.tree import Tree # original tree from WSJ bracketed text sentence = """(TOP (S (S (VP (VBN Turned) (ADVP (RB loose)) (PP (IN in) (NP (NP (NNP Shane) (NNP Longman) (POS 's)) (NN trading) (NN room))))) (, ,) (NP (DT the) (NN yuppie) (NNS dealers)) (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))) (. .)))""" t = Tree.fromstring(sentence, remove_empty_top_bracketing=True) # collapse subtrees with only one child collapsedTree = deepcopy(t) collapse_unary(collapsedTree) # convert the tree to CNF cnfTree = deepcopy(collapsedTree) chomsky_normal_form(cnfTree) # convert the tree to CNF with parent annotation (one level) and horizontal smoothing of order two parentTree = deepcopy(collapsedTree) chomsky_normal_form(parentTree, horzMarkov=2, vertMarkov=1) # convert the tree back to its original form (used to make CYK results comparable) original = deepcopy(parentTree) un_chomsky_normal_form(original) # convert tree back to bracketed text sentence2 = original.pprint() print(sentence) print(sentence2) print("Sentences the same? ", sentence == sentence2) draw_trees(t, collapsedTree, cnfTree, parentTree, original)
A demonstration showing how each tree transform can be used.
170,657
import re from nltk.grammar import Nonterminal, Production from nltk.internals import deprecated class Tree(list): r""" A Tree represents a hierarchical grouping of leaves and subtrees. For example, each constituent in a syntax tree is represented by a single Tree. A tree's children are encoded as a list of leaves and subtrees, where a leaf is a basic (non-tree) value; and a subtree is a nested Tree. >>> from nltk.tree import Tree >>> print(Tree(1, [2, Tree(3, [4]), 5])) (1 2 (3 4) 5) >>> vp = Tree('VP', [Tree('V', ['saw']), ... Tree('NP', ['him'])]) >>> s = Tree('S', [Tree('NP', ['I']), vp]) >>> print(s) (S (NP I) (VP (V saw) (NP him))) >>> print(s[1]) (VP (V saw) (NP him)) >>> print(s[1,1]) (NP him) >>> t = Tree.fromstring("(S (NP I) (VP (V saw) (NP him)))") >>> s == t True >>> t[1][1].set_label('X') >>> t[1][1].label() 'X' >>> print(t) (S (NP I) (VP (V saw) (X him))) >>> t[0], t[1,1] = t[1,1], t[0] >>> print(t) (S (X him) (VP (V saw) (NP I))) The length of a tree is the number of children it has. >>> len(t) 2 The set_label() and label() methods allow individual constituents to be labeled. For example, syntax trees use this label to specify phrase tags, such as "NP" and "VP". Several Tree methods use "tree positions" to specify children or descendants of a tree. Tree positions are defined as follows: - The tree position *i* specifies a Tree's *i*\ th child. - The tree position ``()`` specifies the Tree itself. - If *p* is the tree position of descendant *d*, then *p+i* specifies the *i*\ th child of *d*. I.e., every tree position is either a single index *i*, specifying ``tree[i]``; or a sequence *i1, i2, ..., iN*, specifying ``tree[i1][i2]...[iN]``. Construct a new tree. This constructor can be called in one of two ways: - ``Tree(label, children)`` constructs a new tree with the specified label and list of children. - ``Tree.fromstring(s)`` constructs a new tree by parsing the string ``s``. """ def __init__(self, node, children=None): if children is None: raise TypeError( "%s: Expected a node value and child list " % type(self).__name__ ) elif isinstance(children, str): raise TypeError( "%s() argument 2 should be a list, not a " "string" % type(self).__name__ ) else: list.__init__(self, children) self._label = node # //////////////////////////////////////////////////////////// # Comparison operators # //////////////////////////////////////////////////////////// def __eq__(self, other): return self.__class__ is other.__class__ and (self._label, list(self)) == ( other._label, list(other), ) def __lt__(self, other): if not isinstance(other, Tree): # raise_unorderable_types("<", self, other) # Sometimes children can be pure strings, # so we need to be able to compare with non-trees: return self.__class__.__name__ < other.__class__.__name__ elif self.__class__ is other.__class__: return (self._label, list(self)) < (other._label, list(other)) else: return self.__class__.__name__ < other.__class__.__name__ # @total_ordering doesn't work here, since the class inherits from a builtin class __ne__ = lambda self, other: not self == other __gt__ = lambda self, other: not (self < other or self == other) __le__ = lambda self, other: self < other or self == other __ge__ = lambda self, other: not self < other # //////////////////////////////////////////////////////////// # Disabled list operations # //////////////////////////////////////////////////////////// def __mul__(self, v): raise TypeError("Tree does not support multiplication") def __rmul__(self, v): raise TypeError("Tree does not support multiplication") def __add__(self, v): raise TypeError("Tree does not support addition") def __radd__(self, v): raise TypeError("Tree does not support addition") # //////////////////////////////////////////////////////////// # Indexing (with support for tree positions) # //////////////////////////////////////////////////////////// def __getitem__(self, index): if isinstance(index, (int, slice)): return list.__getitem__(self, index) elif isinstance(index, (list, tuple)): if len(index) == 0: return self elif len(index) == 1: return self[index[0]] else: return self[index[0]][index[1:]] else: raise TypeError( "%s indices must be integers, not %s" % (type(self).__name__, type(index).__name__) ) def __setitem__(self, index, value): if isinstance(index, (int, slice)): return list.__setitem__(self, index, value) elif isinstance(index, (list, tuple)): if len(index) == 0: raise IndexError("The tree position () may not be " "assigned to.") elif len(index) == 1: self[index[0]] = value else: self[index[0]][index[1:]] = value else: raise TypeError( "%s indices must be integers, not %s" % (type(self).__name__, type(index).__name__) ) def __delitem__(self, index): if isinstance(index, (int, slice)): return list.__delitem__(self, index) elif isinstance(index, (list, tuple)): if len(index) == 0: raise IndexError("The tree position () may not be deleted.") elif len(index) == 1: del self[index[0]] else: del self[index[0]][index[1:]] else: raise TypeError( "%s indices must be integers, not %s" % (type(self).__name__, type(index).__name__) ) # //////////////////////////////////////////////////////////// # Basic tree operations # //////////////////////////////////////////////////////////// def _get_node(self): """Outdated method to access the node value; use the label() method instead.""" def _set_node(self, value): """Outdated method to set the node value; use the set_label() method instead.""" node = property(_get_node, _set_node) def label(self): """ Return the node label of the tree. >>> t = Tree.fromstring('(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))') >>> t.label() 'S' :return: the node label (typically a string) :rtype: any """ return self._label def set_label(self, label): """ Set the node label of the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.set_label("T") >>> print(t) (T (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat)))) :param label: the node label (typically a string) :type label: any """ self._label = label def leaves(self): """ Return the leaves of the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.leaves() ['the', 'dog', 'chased', 'the', 'cat'] :return: a list containing this tree's leaves. The order reflects the order of the leaves in the tree's hierarchical structure. :rtype: list """ leaves = [] for child in self: if isinstance(child, Tree): leaves.extend(child.leaves()) else: leaves.append(child) return leaves def flatten(self): """ Return a flat version of the tree, with all non-root non-terminals removed. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> print(t.flatten()) (S the dog chased the cat) :return: a tree consisting of this tree's root connected directly to its leaves, omitting all intervening non-terminal nodes. :rtype: Tree """ return Tree(self.label(), self.leaves()) def height(self): """ Return the height of the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.height() 5 >>> print(t[0,0]) (D the) >>> t[0,0].height() 2 :return: The height of this tree. The height of a tree containing no children is 1; the height of a tree containing only leaves is 2; and the height of any other tree is one plus the maximum of its children's heights. :rtype: int """ max_child_height = 0 for child in self: if isinstance(child, Tree): max_child_height = max(max_child_height, child.height()) else: max_child_height = max(max_child_height, 1) return 1 + max_child_height def treepositions(self, order="preorder"): """ >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.treepositions() # doctest: +ELLIPSIS [(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0), (1, 0, 0), ...] >>> for pos in t.treepositions('leaves'): ... t[pos] = t[pos][::-1].upper() >>> print(t) (S (NP (D EHT) (N GOD)) (VP (V DESAHC) (NP (D EHT) (N TAC)))) :param order: One of: ``preorder``, ``postorder``, ``bothorder``, ``leaves``. """ positions = [] if order in ("preorder", "bothorder"): positions.append(()) for i, child in enumerate(self): if isinstance(child, Tree): childpos = child.treepositions(order) positions.extend((i,) + p for p in childpos) else: positions.append((i,)) if order in ("postorder", "bothorder"): positions.append(()) return positions def subtrees(self, filter=None): """ Generate all the subtrees of this tree, optionally restricted to trees matching the filter function. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> for s in t.subtrees(lambda t: t.height() == 2): ... print(s) (D the) (N dog) (V chased) (D the) (N cat) :type filter: function :param filter: the function to filter all local trees """ if not filter or filter(self): yield self for child in self: if isinstance(child, Tree): yield from child.subtrees(filter) def productions(self): """ Generate the productions that correspond to the non-terminal nodes of the tree. For each subtree of the form (P: C1 C2 ... Cn) this produces a production of the form P -> C1 C2 ... Cn. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.productions() # doctest: +NORMALIZE_WHITESPACE [S -> NP VP, NP -> D N, D -> 'the', N -> 'dog', VP -> V NP, V -> 'chased', NP -> D N, D -> 'the', N -> 'cat'] :rtype: list(Production) """ if not isinstance(self._label, str): raise TypeError( "Productions can only be generated from trees having node labels that are strings" ) prods = [Production(Nonterminal(self._label), _child_names(self))] for child in self: if isinstance(child, Tree): prods += child.productions() return prods def pos(self): """ Return a sequence of pos-tagged words extracted from the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.pos() [('the', 'D'), ('dog', 'N'), ('chased', 'V'), ('the', 'D'), ('cat', 'N')] :return: a list of tuples containing leaves and pre-terminals (part-of-speech tags). The order reflects the order of the leaves in the tree's hierarchical structure. :rtype: list(tuple) """ pos = [] for child in self: if isinstance(child, Tree): pos.extend(child.pos()) else: pos.append((child, self._label)) return pos def leaf_treeposition(self, index): """ :return: The tree position of the ``index``-th leaf in this tree. I.e., if ``tp=self.leaf_treeposition(i)``, then ``self[tp]==self.leaves()[i]``. :raise IndexError: If this tree contains fewer than ``index+1`` leaves, or if ``index<0``. """ if index < 0: raise IndexError("index must be non-negative") stack = [(self, ())] while stack: value, treepos = stack.pop() if not isinstance(value, Tree): if index == 0: return treepos else: index -= 1 else: for i in range(len(value) - 1, -1, -1): stack.append((value[i], treepos + (i,))) raise IndexError("index must be less than or equal to len(self)") def treeposition_spanning_leaves(self, start, end): """ :return: The tree position of the lowest descendant of this tree that dominates ``self.leaves()[start:end]``. :raise ValueError: if ``end <= start`` """ if end <= start: raise ValueError("end must be greater than start") # Find the tree positions of the start & end leaves, and # take the longest common subsequence. start_treepos = self.leaf_treeposition(start) end_treepos = self.leaf_treeposition(end - 1) # Find the first index where they mismatch: for i in range(len(start_treepos)): if i == len(end_treepos) or start_treepos[i] != end_treepos[i]: return start_treepos[:i] return start_treepos # //////////////////////////////////////////////////////////// # Transforms # //////////////////////////////////////////////////////////// def chomsky_normal_form( self, factor="right", horzMarkov=None, vertMarkov=0, childChar="|", parentChar="^", ): """ This method can modify a tree in three ways: 1. Convert a tree into its Chomsky Normal Form (CNF) equivalent -- Every subtree has either two non-terminals or one terminal as its children. This process requires the creation of more"artificial" non-terminal nodes. 2. Markov (vertical) smoothing of children in new artificial nodes 3. Horizontal (parent) annotation of nodes :param factor: Right or left factoring method (default = "right") :type factor: str = [left|right] :param horzMarkov: Markov order for sibling smoothing in artificial nodes (None (default) = include all siblings) :type horzMarkov: int | None :param vertMarkov: Markov order for parent smoothing (0 (default) = no vertical annotation) :type vertMarkov: int | None :param childChar: A string used in construction of the artificial nodes, separating the head of the original subtree from the child nodes that have yet to be expanded (default = "|") :type childChar: str :param parentChar: A string used to separate the node representation from its vertical annotation :type parentChar: str """ from nltk.tree.transforms import chomsky_normal_form chomsky_normal_form(self, factor, horzMarkov, vertMarkov, childChar, parentChar) def un_chomsky_normal_form( self, expandUnary=True, childChar="|", parentChar="^", unaryChar="+" ): """ This method modifies the tree in three ways: 1. Transforms a tree in Chomsky Normal Form back to its original structure (branching greater than two) 2. Removes any parent annotation (if it exists) 3. (optional) expands unary subtrees (if previously collapsed with collapseUnary(...) ) :param expandUnary: Flag to expand unary or not (default = True) :type expandUnary: bool :param childChar: A string separating the head node from its children in an artificial node (default = "|") :type childChar: str :param parentChar: A string separating the node label from its parent annotation (default = "^") :type parentChar: str :param unaryChar: A string joining two non-terminals in a unary production (default = "+") :type unaryChar: str """ from nltk.tree.transforms import un_chomsky_normal_form un_chomsky_normal_form(self, expandUnary, childChar, parentChar, unaryChar) def collapse_unary(self, collapsePOS=False, collapseRoot=False, joinChar="+"): """ Collapse subtrees with a single child (ie. unary productions) into a new non-terminal (Tree node) joined by 'joinChar'. This is useful when working with algorithms that do not allow unary productions, and completely removing the unary productions would require loss of useful information. The Tree is modified directly (since it is passed by reference) and no value is returned. :param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie. Part-of-Speech tags) since they are always unary productions :type collapsePOS: bool :param collapseRoot: 'False' (default) will not modify the root production if it is unary. For the Penn WSJ treebank corpus, this corresponds to the TOP -> productions. :type collapseRoot: bool :param joinChar: A string used to connect collapsed node values (default = "+") :type joinChar: str """ from nltk.tree.transforms import collapse_unary collapse_unary(self, collapsePOS, collapseRoot, joinChar) # //////////////////////////////////////////////////////////// # Convert, copy # //////////////////////////////////////////////////////////// def convert(cls, tree): """ Convert a tree between different subtypes of Tree. ``cls`` determines which class will be used to encode the new tree. :type tree: Tree :param tree: The tree that should be converted. :return: The new Tree. """ if isinstance(tree, Tree): children = [cls.convert(child) for child in tree] return cls(tree._label, children) else: return tree def __copy__(self): return self.copy() def __deepcopy__(self, memo): return self.copy(deep=True) def copy(self, deep=False): if not deep: return type(self)(self._label, self) else: return type(self).convert(self) def _frozen_class(self): from nltk.tree.immutable import ImmutableTree return ImmutableTree def freeze(self, leaf_freezer=None): frozen_class = self._frozen_class() if leaf_freezer is None: newcopy = frozen_class.convert(self) else: newcopy = self.copy(deep=True) for pos in newcopy.treepositions("leaves"): newcopy[pos] = leaf_freezer(newcopy[pos]) newcopy = frozen_class.convert(newcopy) hash(newcopy) # Make sure the leaves are hashable. return newcopy # //////////////////////////////////////////////////////////// # Parsing # //////////////////////////////////////////////////////////// def fromstring( cls, s, brackets="()", read_node=None, read_leaf=None, node_pattern=None, leaf_pattern=None, remove_empty_top_bracketing=False, ): """ Read a bracketed tree string and return the resulting tree. Trees are represented as nested brackettings, such as:: (S (NP (NNP John)) (VP (V runs))) :type s: str :param s: The string to read :type brackets: str (length=2) :param brackets: The bracket characters used to mark the beginning and end of trees and subtrees. :type read_node: function :type read_leaf: function :param read_node, read_leaf: If specified, these functions are applied to the substrings of ``s`` corresponding to nodes and leaves (respectively) to obtain the values for those nodes and leaves. They should have the following signature: read_node(str) -> value For example, these functions could be used to process nodes and leaves whose values should be some type other than string (such as ``FeatStruct``). Note that by default, node strings and leaf strings are delimited by whitespace and brackets; to override this default, use the ``node_pattern`` and ``leaf_pattern`` arguments. :type node_pattern: str :type leaf_pattern: str :param node_pattern, leaf_pattern: Regular expression patterns used to find node and leaf substrings in ``s``. By default, both nodes patterns are defined to match any sequence of non-whitespace non-bracket characters. :type remove_empty_top_bracketing: bool :param remove_empty_top_bracketing: If the resulting tree has an empty node label, and is length one, then return its single child instead. This is useful for treebank trees, which sometimes contain an extra level of bracketing. :return: A tree corresponding to the string representation ``s``. If this class method is called using a subclass of Tree, then it will return a tree of that type. :rtype: Tree """ if not isinstance(brackets, str) or len(brackets) != 2: raise TypeError("brackets must be a length-2 string") if re.search(r"\s", brackets): raise TypeError("whitespace brackets not allowed") # Construct a regexp that will tokenize the string. open_b, close_b = brackets open_pattern, close_pattern = (re.escape(open_b), re.escape(close_b)) if node_pattern is None: node_pattern = rf"[^\s{open_pattern}{close_pattern}]+" if leaf_pattern is None: leaf_pattern = rf"[^\s{open_pattern}{close_pattern}]+" token_re = re.compile( r"%s\s*(%s)?|%s|(%s)" % (open_pattern, node_pattern, close_pattern, leaf_pattern) ) # Walk through each token, updating a stack of trees. stack = [(None, [])] # list of (node, children) tuples for match in token_re.finditer(s): token = match.group() # Beginning of a tree/subtree if token[0] == open_b: if len(stack) == 1 and len(stack[0][1]) > 0: cls._parse_error(s, match, "end-of-string") label = token[1:].lstrip() if read_node is not None: label = read_node(label) stack.append((label, [])) # End of a tree/subtree elif token == close_b: if len(stack) == 1: if len(stack[0][1]) == 0: cls._parse_error(s, match, open_b) else: cls._parse_error(s, match, "end-of-string") label, children = stack.pop() stack[-1][1].append(cls(label, children)) # Leaf node else: if len(stack) == 1: cls._parse_error(s, match, open_b) if read_leaf is not None: token = read_leaf(token) stack[-1][1].append(token) # check that we got exactly one complete tree. if len(stack) > 1: cls._parse_error(s, "end-of-string", close_b) elif len(stack[0][1]) == 0: cls._parse_error(s, "end-of-string", open_b) else: assert stack[0][0] is None assert len(stack[0][1]) == 1 tree = stack[0][1][0] # If the tree has an extra level with node='', then get rid of # it. E.g.: "((S (NP ...) (VP ...)))" if remove_empty_top_bracketing and tree._label == "" and len(tree) == 1: tree = tree[0] # return the tree. return tree def _parse_error(cls, s, match, expecting): """ Display a friendly error message when parsing a tree string fails. :param s: The string we're parsing. :param match: regexp match of the problem token. :param expecting: what we expected to see instead. """ # Construct a basic error message if match == "end-of-string": pos, token = len(s), "end-of-string" else: pos, token = match.start(), match.group() msg = "%s.read(): expected %r but got %r\n%sat index %d." % ( cls.__name__, expecting, token, " " * 12, pos, ) # Add a display showing the error token itsels: s = s.replace("\n", " ").replace("\t", " ") offset = pos if len(s) > pos + 10: s = s[: pos + 10] + "..." if pos > 10: s = "..." + s[pos - 10 :] offset = 13 msg += '\n{}"{}"\n{}^'.format(" " * 16, s, " " * (17 + offset)) raise ValueError(msg) def fromlist(cls, l): """ :type l: list :param l: a tree represented as nested lists :return: A tree corresponding to the list representation ``l``. :rtype: Tree Convert nested lists to a NLTK Tree """ if type(l) == list and len(l) > 0: label = repr(l[0]) if len(l) > 1: return Tree(label, [cls.fromlist(child) for child in l[1:]]) else: return label # //////////////////////////////////////////////////////////// # Visualization & String Representation # //////////////////////////////////////////////////////////// def draw(self): """ Open a new window containing a graphical diagram of this tree. """ from nltk.draw.tree import draw_trees draw_trees(self) def pretty_print(self, sentence=None, highlight=(), stream=None, **kwargs): """ Pretty-print this tree as ASCII or Unicode art. For explanation of the arguments, see the documentation for `nltk.tree.prettyprinter.TreePrettyPrinter`. """ from nltk.tree.prettyprinter import TreePrettyPrinter print(TreePrettyPrinter(self, sentence, highlight).text(**kwargs), file=stream) def __repr__(self): childstr = ", ".join(repr(c) for c in self) return "{}({}, [{}])".format( type(self).__name__, repr(self._label), childstr, ) def _repr_svg_(self): from svgling import draw_tree return draw_tree(self)._repr_svg_() def __str__(self): return self.pformat() def pprint(self, **kwargs): """ Print a string representation of this Tree to 'stream' """ if "stream" in kwargs: stream = kwargs["stream"] del kwargs["stream"] else: stream = None print(self.pformat(**kwargs), file=stream) def pformat(self, margin=70, indent=0, nodesep="", parens="()", quotes=False): """ :return: A pretty-printed string representation of this tree. :rtype: str :param margin: The right margin at which to do line-wrapping. :type margin: int :param indent: The indentation level at which printing begins. This number is used to decide how far to indent subsequent lines. :type indent: int :param nodesep: A string that is used to separate the node from the children. E.g., the default value ``':'`` gives trees like ``(S: (NP: I) (VP: (V: saw) (NP: it)))``. """ # Try writing it on one line. s = self._pformat_flat(nodesep, parens, quotes) if len(s) + indent < margin: return s # If it doesn't fit on one line, then write it on multi-lines. if isinstance(self._label, str): s = f"{parens[0]}{self._label}{nodesep}" else: s = f"{parens[0]}{repr(self._label)}{nodesep}" for child in self: if isinstance(child, Tree): s += ( "\n" + " " * (indent + 2) + child.pformat(margin, indent + 2, nodesep, parens, quotes) ) elif isinstance(child, tuple): s += "\n" + " " * (indent + 2) + "/".join(child) elif isinstance(child, str) and not quotes: s += "\n" + " " * (indent + 2) + "%s" % child else: s += "\n" + " " * (indent + 2) + repr(child) return s + parens[1] def pformat_latex_qtree(self): r""" Returns a representation of the tree compatible with the LaTeX qtree package. This consists of the string ``\Tree`` followed by the tree represented in bracketed notation. For example, the following result was generated from a parse tree of the sentence ``The announcement astounded us``:: \Tree [.I'' [.N'' [.D The ] [.N' [.N announcement ] ] ] [.I' [.V'' [.V' [.V astounded ] [.N'' [.N' [.N us ] ] ] ] ] ] ] See https://www.ling.upenn.edu/advice/latex.html for the LaTeX style file for the qtree package. :return: A latex qtree representation of this tree. :rtype: str """ reserved_chars = re.compile(r"([#\$%&~_\{\}])") pformat = self.pformat(indent=6, nodesep="", parens=("[.", " ]")) return r"\Tree " + re.sub(reserved_chars, r"\\\1", pformat) def _pformat_flat(self, nodesep, parens, quotes): childstrs = [] for child in self: if isinstance(child, Tree): childstrs.append(child._pformat_flat(nodesep, parens, quotes)) elif isinstance(child, tuple): childstrs.append("/".join(child)) elif isinstance(child, str) and not quotes: childstrs.append("%s" % child) else: childstrs.append(repr(child)) if isinstance(self._label, str): return "{}{}{} {}{}".format( parens[0], self._label, nodesep, " ".join(childstrs), parens[1], ) else: return "{}{}{} {}{}".format( parens[0], repr(self._label), nodesep, " ".join(childstrs), parens[1], ) class Nonterminal: """ A non-terminal symbol for a context free grammar. ``Nonterminal`` is a wrapper class for node values; it is used by ``Production`` objects to distinguish node values from leaf values. The node value that is wrapped by a ``Nonterminal`` is known as its "symbol". Symbols are typically strings representing phrasal categories (such as ``"NP"`` or ``"VP"``). However, more complex symbol types are sometimes used (e.g., for lexicalized grammars). Since symbols are node values, they must be immutable and hashable. Two ``Nonterminals`` are considered equal if their symbols are equal. :see: ``CFG``, ``Production`` :type _symbol: any :ivar _symbol: The node value corresponding to this ``Nonterminal``. This value must be immutable and hashable. """ def __init__(self, symbol): """ Construct a new non-terminal from the given symbol. :type symbol: any :param symbol: The node value corresponding to this ``Nonterminal``. This value must be immutable and hashable. """ self._symbol = symbol def symbol(self): """ Return the node value corresponding to this ``Nonterminal``. :rtype: (any) """ return self._symbol def __eq__(self, other): """ Return True if this non-terminal is equal to ``other``. In particular, return True if ``other`` is a ``Nonterminal`` and this non-terminal's symbol is equal to ``other`` 's symbol. :rtype: bool """ return type(self) == type(other) and self._symbol == other._symbol def __ne__(self, other): return not self == other def __lt__(self, other): if not isinstance(other, Nonterminal): raise_unorderable_types("<", self, other) return self._symbol < other._symbol def __hash__(self): return hash(self._symbol) def __repr__(self): """ Return a string representation for this ``Nonterminal``. :rtype: str """ if isinstance(self._symbol, str): return "%s" % self._symbol else: return "%s" % repr(self._symbol) def __str__(self): """ Return a string representation for this ``Nonterminal``. :rtype: str """ if isinstance(self._symbol, str): return "%s" % self._symbol else: return "%s" % repr(self._symbol) def __div__(self, rhs): """ Return a new nonterminal whose symbol is ``A/B``, where ``A`` is the symbol for this nonterminal, and ``B`` is the symbol for rhs. :param rhs: The nonterminal used to form the right hand side of the new nonterminal. :type rhs: Nonterminal :rtype: Nonterminal """ return Nonterminal(f"{self._symbol}/{rhs._symbol}") def __truediv__(self, rhs): """ Return a new nonterminal whose symbol is ``A/B``, where ``A`` is the symbol for this nonterminal, and ``B`` is the symbol for rhs. This function allows use of the slash ``/`` operator with the future import of division. :param rhs: The nonterminal used to form the right hand side of the new nonterminal. :type rhs: Nonterminal :rtype: Nonterminal """ return self.__div__(rhs) def _child_names(tree): names = [] for child in tree: if isinstance(child, Tree): names.append(Nonterminal(child._label)) else: names.append(child) return names
null
170,658
import re from nltk.grammar import Nonterminal, Production from nltk.internals import deprecated class Tree(list): r""" A Tree represents a hierarchical grouping of leaves and subtrees. For example, each constituent in a syntax tree is represented by a single Tree. A tree's children are encoded as a list of leaves and subtrees, where a leaf is a basic (non-tree) value; and a subtree is a nested Tree. >>> from nltk.tree import Tree >>> print(Tree(1, [2, Tree(3, [4]), 5])) (1 2 (3 4) 5) >>> vp = Tree('VP', [Tree('V', ['saw']), ... Tree('NP', ['him'])]) >>> s = Tree('S', [Tree('NP', ['I']), vp]) >>> print(s) (S (NP I) (VP (V saw) (NP him))) >>> print(s[1]) (VP (V saw) (NP him)) >>> print(s[1,1]) (NP him) >>> t = Tree.fromstring("(S (NP I) (VP (V saw) (NP him)))") >>> s == t True >>> t[1][1].set_label('X') >>> t[1][1].label() 'X' >>> print(t) (S (NP I) (VP (V saw) (X him))) >>> t[0], t[1,1] = t[1,1], t[0] >>> print(t) (S (X him) (VP (V saw) (NP I))) The length of a tree is the number of children it has. >>> len(t) 2 The set_label() and label() methods allow individual constituents to be labeled. For example, syntax trees use this label to specify phrase tags, such as "NP" and "VP". Several Tree methods use "tree positions" to specify children or descendants of a tree. Tree positions are defined as follows: - The tree position *i* specifies a Tree's *i*\ th child. - The tree position ``()`` specifies the Tree itself. - If *p* is the tree position of descendant *d*, then *p+i* specifies the *i*\ th child of *d*. I.e., every tree position is either a single index *i*, specifying ``tree[i]``; or a sequence *i1, i2, ..., iN*, specifying ``tree[i1][i2]...[iN]``. Construct a new tree. This constructor can be called in one of two ways: - ``Tree(label, children)`` constructs a new tree with the specified label and list of children. - ``Tree.fromstring(s)`` constructs a new tree by parsing the string ``s``. """ def __init__(self, node, children=None): if children is None: raise TypeError( "%s: Expected a node value and child list " % type(self).__name__ ) elif isinstance(children, str): raise TypeError( "%s() argument 2 should be a list, not a " "string" % type(self).__name__ ) else: list.__init__(self, children) self._label = node # //////////////////////////////////////////////////////////// # Comparison operators # //////////////////////////////////////////////////////////// def __eq__(self, other): return self.__class__ is other.__class__ and (self._label, list(self)) == ( other._label, list(other), ) def __lt__(self, other): if not isinstance(other, Tree): # raise_unorderable_types("<", self, other) # Sometimes children can be pure strings, # so we need to be able to compare with non-trees: return self.__class__.__name__ < other.__class__.__name__ elif self.__class__ is other.__class__: return (self._label, list(self)) < (other._label, list(other)) else: return self.__class__.__name__ < other.__class__.__name__ # @total_ordering doesn't work here, since the class inherits from a builtin class __ne__ = lambda self, other: not self == other __gt__ = lambda self, other: not (self < other or self == other) __le__ = lambda self, other: self < other or self == other __ge__ = lambda self, other: not self < other # //////////////////////////////////////////////////////////// # Disabled list operations # //////////////////////////////////////////////////////////// def __mul__(self, v): raise TypeError("Tree does not support multiplication") def __rmul__(self, v): raise TypeError("Tree does not support multiplication") def __add__(self, v): raise TypeError("Tree does not support addition") def __radd__(self, v): raise TypeError("Tree does not support addition") # //////////////////////////////////////////////////////////// # Indexing (with support for tree positions) # //////////////////////////////////////////////////////////// def __getitem__(self, index): if isinstance(index, (int, slice)): return list.__getitem__(self, index) elif isinstance(index, (list, tuple)): if len(index) == 0: return self elif len(index) == 1: return self[index[0]] else: return self[index[0]][index[1:]] else: raise TypeError( "%s indices must be integers, not %s" % (type(self).__name__, type(index).__name__) ) def __setitem__(self, index, value): if isinstance(index, (int, slice)): return list.__setitem__(self, index, value) elif isinstance(index, (list, tuple)): if len(index) == 0: raise IndexError("The tree position () may not be " "assigned to.") elif len(index) == 1: self[index[0]] = value else: self[index[0]][index[1:]] = value else: raise TypeError( "%s indices must be integers, not %s" % (type(self).__name__, type(index).__name__) ) def __delitem__(self, index): if isinstance(index, (int, slice)): return list.__delitem__(self, index) elif isinstance(index, (list, tuple)): if len(index) == 0: raise IndexError("The tree position () may not be deleted.") elif len(index) == 1: del self[index[0]] else: del self[index[0]][index[1:]] else: raise TypeError( "%s indices must be integers, not %s" % (type(self).__name__, type(index).__name__) ) # //////////////////////////////////////////////////////////// # Basic tree operations # //////////////////////////////////////////////////////////// def _get_node(self): """Outdated method to access the node value; use the label() method instead.""" def _set_node(self, value): """Outdated method to set the node value; use the set_label() method instead.""" node = property(_get_node, _set_node) def label(self): """ Return the node label of the tree. >>> t = Tree.fromstring('(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))') >>> t.label() 'S' :return: the node label (typically a string) :rtype: any """ return self._label def set_label(self, label): """ Set the node label of the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.set_label("T") >>> print(t) (T (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat)))) :param label: the node label (typically a string) :type label: any """ self._label = label def leaves(self): """ Return the leaves of the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.leaves() ['the', 'dog', 'chased', 'the', 'cat'] :return: a list containing this tree's leaves. The order reflects the order of the leaves in the tree's hierarchical structure. :rtype: list """ leaves = [] for child in self: if isinstance(child, Tree): leaves.extend(child.leaves()) else: leaves.append(child) return leaves def flatten(self): """ Return a flat version of the tree, with all non-root non-terminals removed. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> print(t.flatten()) (S the dog chased the cat) :return: a tree consisting of this tree's root connected directly to its leaves, omitting all intervening non-terminal nodes. :rtype: Tree """ return Tree(self.label(), self.leaves()) def height(self): """ Return the height of the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.height() 5 >>> print(t[0,0]) (D the) >>> t[0,0].height() 2 :return: The height of this tree. The height of a tree containing no children is 1; the height of a tree containing only leaves is 2; and the height of any other tree is one plus the maximum of its children's heights. :rtype: int """ max_child_height = 0 for child in self: if isinstance(child, Tree): max_child_height = max(max_child_height, child.height()) else: max_child_height = max(max_child_height, 1) return 1 + max_child_height def treepositions(self, order="preorder"): """ >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.treepositions() # doctest: +ELLIPSIS [(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0), (1, 0, 0), ...] >>> for pos in t.treepositions('leaves'): ... t[pos] = t[pos][::-1].upper() >>> print(t) (S (NP (D EHT) (N GOD)) (VP (V DESAHC) (NP (D EHT) (N TAC)))) :param order: One of: ``preorder``, ``postorder``, ``bothorder``, ``leaves``. """ positions = [] if order in ("preorder", "bothorder"): positions.append(()) for i, child in enumerate(self): if isinstance(child, Tree): childpos = child.treepositions(order) positions.extend((i,) + p for p in childpos) else: positions.append((i,)) if order in ("postorder", "bothorder"): positions.append(()) return positions def subtrees(self, filter=None): """ Generate all the subtrees of this tree, optionally restricted to trees matching the filter function. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> for s in t.subtrees(lambda t: t.height() == 2): ... print(s) (D the) (N dog) (V chased) (D the) (N cat) :type filter: function :param filter: the function to filter all local trees """ if not filter or filter(self): yield self for child in self: if isinstance(child, Tree): yield from child.subtrees(filter) def productions(self): """ Generate the productions that correspond to the non-terminal nodes of the tree. For each subtree of the form (P: C1 C2 ... Cn) this produces a production of the form P -> C1 C2 ... Cn. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.productions() # doctest: +NORMALIZE_WHITESPACE [S -> NP VP, NP -> D N, D -> 'the', N -> 'dog', VP -> V NP, V -> 'chased', NP -> D N, D -> 'the', N -> 'cat'] :rtype: list(Production) """ if not isinstance(self._label, str): raise TypeError( "Productions can only be generated from trees having node labels that are strings" ) prods = [Production(Nonterminal(self._label), _child_names(self))] for child in self: if isinstance(child, Tree): prods += child.productions() return prods def pos(self): """ Return a sequence of pos-tagged words extracted from the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.pos() [('the', 'D'), ('dog', 'N'), ('chased', 'V'), ('the', 'D'), ('cat', 'N')] :return: a list of tuples containing leaves and pre-terminals (part-of-speech tags). The order reflects the order of the leaves in the tree's hierarchical structure. :rtype: list(tuple) """ pos = [] for child in self: if isinstance(child, Tree): pos.extend(child.pos()) else: pos.append((child, self._label)) return pos def leaf_treeposition(self, index): """ :return: The tree position of the ``index``-th leaf in this tree. I.e., if ``tp=self.leaf_treeposition(i)``, then ``self[tp]==self.leaves()[i]``. :raise IndexError: If this tree contains fewer than ``index+1`` leaves, or if ``index<0``. """ if index < 0: raise IndexError("index must be non-negative") stack = [(self, ())] while stack: value, treepos = stack.pop() if not isinstance(value, Tree): if index == 0: return treepos else: index -= 1 else: for i in range(len(value) - 1, -1, -1): stack.append((value[i], treepos + (i,))) raise IndexError("index must be less than or equal to len(self)") def treeposition_spanning_leaves(self, start, end): """ :return: The tree position of the lowest descendant of this tree that dominates ``self.leaves()[start:end]``. :raise ValueError: if ``end <= start`` """ if end <= start: raise ValueError("end must be greater than start") # Find the tree positions of the start & end leaves, and # take the longest common subsequence. start_treepos = self.leaf_treeposition(start) end_treepos = self.leaf_treeposition(end - 1) # Find the first index where they mismatch: for i in range(len(start_treepos)): if i == len(end_treepos) or start_treepos[i] != end_treepos[i]: return start_treepos[:i] return start_treepos # //////////////////////////////////////////////////////////// # Transforms # //////////////////////////////////////////////////////////// def chomsky_normal_form( self, factor="right", horzMarkov=None, vertMarkov=0, childChar="|", parentChar="^", ): """ This method can modify a tree in three ways: 1. Convert a tree into its Chomsky Normal Form (CNF) equivalent -- Every subtree has either two non-terminals or one terminal as its children. This process requires the creation of more"artificial" non-terminal nodes. 2. Markov (vertical) smoothing of children in new artificial nodes 3. Horizontal (parent) annotation of nodes :param factor: Right or left factoring method (default = "right") :type factor: str = [left|right] :param horzMarkov: Markov order for sibling smoothing in artificial nodes (None (default) = include all siblings) :type horzMarkov: int | None :param vertMarkov: Markov order for parent smoothing (0 (default) = no vertical annotation) :type vertMarkov: int | None :param childChar: A string used in construction of the artificial nodes, separating the head of the original subtree from the child nodes that have yet to be expanded (default = "|") :type childChar: str :param parentChar: A string used to separate the node representation from its vertical annotation :type parentChar: str """ from nltk.tree.transforms import chomsky_normal_form chomsky_normal_form(self, factor, horzMarkov, vertMarkov, childChar, parentChar) def un_chomsky_normal_form( self, expandUnary=True, childChar="|", parentChar="^", unaryChar="+" ): """ This method modifies the tree in three ways: 1. Transforms a tree in Chomsky Normal Form back to its original structure (branching greater than two) 2. Removes any parent annotation (if it exists) 3. (optional) expands unary subtrees (if previously collapsed with collapseUnary(...) ) :param expandUnary: Flag to expand unary or not (default = True) :type expandUnary: bool :param childChar: A string separating the head node from its children in an artificial node (default = "|") :type childChar: str :param parentChar: A string separating the node label from its parent annotation (default = "^") :type parentChar: str :param unaryChar: A string joining two non-terminals in a unary production (default = "+") :type unaryChar: str """ from nltk.tree.transforms import un_chomsky_normal_form un_chomsky_normal_form(self, expandUnary, childChar, parentChar, unaryChar) def collapse_unary(self, collapsePOS=False, collapseRoot=False, joinChar="+"): """ Collapse subtrees with a single child (ie. unary productions) into a new non-terminal (Tree node) joined by 'joinChar'. This is useful when working with algorithms that do not allow unary productions, and completely removing the unary productions would require loss of useful information. The Tree is modified directly (since it is passed by reference) and no value is returned. :param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie. Part-of-Speech tags) since they are always unary productions :type collapsePOS: bool :param collapseRoot: 'False' (default) will not modify the root production if it is unary. For the Penn WSJ treebank corpus, this corresponds to the TOP -> productions. :type collapseRoot: bool :param joinChar: A string used to connect collapsed node values (default = "+") :type joinChar: str """ from nltk.tree.transforms import collapse_unary collapse_unary(self, collapsePOS, collapseRoot, joinChar) # //////////////////////////////////////////////////////////// # Convert, copy # //////////////////////////////////////////////////////////// def convert(cls, tree): """ Convert a tree between different subtypes of Tree. ``cls`` determines which class will be used to encode the new tree. :type tree: Tree :param tree: The tree that should be converted. :return: The new Tree. """ if isinstance(tree, Tree): children = [cls.convert(child) for child in tree] return cls(tree._label, children) else: return tree def __copy__(self): return self.copy() def __deepcopy__(self, memo): return self.copy(deep=True) def copy(self, deep=False): if not deep: return type(self)(self._label, self) else: return type(self).convert(self) def _frozen_class(self): from nltk.tree.immutable import ImmutableTree return ImmutableTree def freeze(self, leaf_freezer=None): frozen_class = self._frozen_class() if leaf_freezer is None: newcopy = frozen_class.convert(self) else: newcopy = self.copy(deep=True) for pos in newcopy.treepositions("leaves"): newcopy[pos] = leaf_freezer(newcopy[pos]) newcopy = frozen_class.convert(newcopy) hash(newcopy) # Make sure the leaves are hashable. return newcopy # //////////////////////////////////////////////////////////// # Parsing # //////////////////////////////////////////////////////////// def fromstring( cls, s, brackets="()", read_node=None, read_leaf=None, node_pattern=None, leaf_pattern=None, remove_empty_top_bracketing=False, ): """ Read a bracketed tree string and return the resulting tree. Trees are represented as nested brackettings, such as:: (S (NP (NNP John)) (VP (V runs))) :type s: str :param s: The string to read :type brackets: str (length=2) :param brackets: The bracket characters used to mark the beginning and end of trees and subtrees. :type read_node: function :type read_leaf: function :param read_node, read_leaf: If specified, these functions are applied to the substrings of ``s`` corresponding to nodes and leaves (respectively) to obtain the values for those nodes and leaves. They should have the following signature: read_node(str) -> value For example, these functions could be used to process nodes and leaves whose values should be some type other than string (such as ``FeatStruct``). Note that by default, node strings and leaf strings are delimited by whitespace and brackets; to override this default, use the ``node_pattern`` and ``leaf_pattern`` arguments. :type node_pattern: str :type leaf_pattern: str :param node_pattern, leaf_pattern: Regular expression patterns used to find node and leaf substrings in ``s``. By default, both nodes patterns are defined to match any sequence of non-whitespace non-bracket characters. :type remove_empty_top_bracketing: bool :param remove_empty_top_bracketing: If the resulting tree has an empty node label, and is length one, then return its single child instead. This is useful for treebank trees, which sometimes contain an extra level of bracketing. :return: A tree corresponding to the string representation ``s``. If this class method is called using a subclass of Tree, then it will return a tree of that type. :rtype: Tree """ if not isinstance(brackets, str) or len(brackets) != 2: raise TypeError("brackets must be a length-2 string") if re.search(r"\s", brackets): raise TypeError("whitespace brackets not allowed") # Construct a regexp that will tokenize the string. open_b, close_b = brackets open_pattern, close_pattern = (re.escape(open_b), re.escape(close_b)) if node_pattern is None: node_pattern = rf"[^\s{open_pattern}{close_pattern}]+" if leaf_pattern is None: leaf_pattern = rf"[^\s{open_pattern}{close_pattern}]+" token_re = re.compile( r"%s\s*(%s)?|%s|(%s)" % (open_pattern, node_pattern, close_pattern, leaf_pattern) ) # Walk through each token, updating a stack of trees. stack = [(None, [])] # list of (node, children) tuples for match in token_re.finditer(s): token = match.group() # Beginning of a tree/subtree if token[0] == open_b: if len(stack) == 1 and len(stack[0][1]) > 0: cls._parse_error(s, match, "end-of-string") label = token[1:].lstrip() if read_node is not None: label = read_node(label) stack.append((label, [])) # End of a tree/subtree elif token == close_b: if len(stack) == 1: if len(stack[0][1]) == 0: cls._parse_error(s, match, open_b) else: cls._parse_error(s, match, "end-of-string") label, children = stack.pop() stack[-1][1].append(cls(label, children)) # Leaf node else: if len(stack) == 1: cls._parse_error(s, match, open_b) if read_leaf is not None: token = read_leaf(token) stack[-1][1].append(token) # check that we got exactly one complete tree. if len(stack) > 1: cls._parse_error(s, "end-of-string", close_b) elif len(stack[0][1]) == 0: cls._parse_error(s, "end-of-string", open_b) else: assert stack[0][0] is None assert len(stack[0][1]) == 1 tree = stack[0][1][0] # If the tree has an extra level with node='', then get rid of # it. E.g.: "((S (NP ...) (VP ...)))" if remove_empty_top_bracketing and tree._label == "" and len(tree) == 1: tree = tree[0] # return the tree. return tree def _parse_error(cls, s, match, expecting): """ Display a friendly error message when parsing a tree string fails. :param s: The string we're parsing. :param match: regexp match of the problem token. :param expecting: what we expected to see instead. """ # Construct a basic error message if match == "end-of-string": pos, token = len(s), "end-of-string" else: pos, token = match.start(), match.group() msg = "%s.read(): expected %r but got %r\n%sat index %d." % ( cls.__name__, expecting, token, " " * 12, pos, ) # Add a display showing the error token itsels: s = s.replace("\n", " ").replace("\t", " ") offset = pos if len(s) > pos + 10: s = s[: pos + 10] + "..." if pos > 10: s = "..." + s[pos - 10 :] offset = 13 msg += '\n{}"{}"\n{}^'.format(" " * 16, s, " " * (17 + offset)) raise ValueError(msg) def fromlist(cls, l): """ :type l: list :param l: a tree represented as nested lists :return: A tree corresponding to the list representation ``l``. :rtype: Tree Convert nested lists to a NLTK Tree """ if type(l) == list and len(l) > 0: label = repr(l[0]) if len(l) > 1: return Tree(label, [cls.fromlist(child) for child in l[1:]]) else: return label # //////////////////////////////////////////////////////////// # Visualization & String Representation # //////////////////////////////////////////////////////////// def draw(self): """ Open a new window containing a graphical diagram of this tree. """ from nltk.draw.tree import draw_trees draw_trees(self) def pretty_print(self, sentence=None, highlight=(), stream=None, **kwargs): """ Pretty-print this tree as ASCII or Unicode art. For explanation of the arguments, see the documentation for `nltk.tree.prettyprinter.TreePrettyPrinter`. """ from nltk.tree.prettyprinter import TreePrettyPrinter print(TreePrettyPrinter(self, sentence, highlight).text(**kwargs), file=stream) def __repr__(self): childstr = ", ".join(repr(c) for c in self) return "{}({}, [{}])".format( type(self).__name__, repr(self._label), childstr, ) def _repr_svg_(self): from svgling import draw_tree return draw_tree(self)._repr_svg_() def __str__(self): return self.pformat() def pprint(self, **kwargs): """ Print a string representation of this Tree to 'stream' """ if "stream" in kwargs: stream = kwargs["stream"] del kwargs["stream"] else: stream = None print(self.pformat(**kwargs), file=stream) def pformat(self, margin=70, indent=0, nodesep="", parens="()", quotes=False): """ :return: A pretty-printed string representation of this tree. :rtype: str :param margin: The right margin at which to do line-wrapping. :type margin: int :param indent: The indentation level at which printing begins. This number is used to decide how far to indent subsequent lines. :type indent: int :param nodesep: A string that is used to separate the node from the children. E.g., the default value ``':'`` gives trees like ``(S: (NP: I) (VP: (V: saw) (NP: it)))``. """ # Try writing it on one line. s = self._pformat_flat(nodesep, parens, quotes) if len(s) + indent < margin: return s # If it doesn't fit on one line, then write it on multi-lines. if isinstance(self._label, str): s = f"{parens[0]}{self._label}{nodesep}" else: s = f"{parens[0]}{repr(self._label)}{nodesep}" for child in self: if isinstance(child, Tree): s += ( "\n" + " " * (indent + 2) + child.pformat(margin, indent + 2, nodesep, parens, quotes) ) elif isinstance(child, tuple): s += "\n" + " " * (indent + 2) + "/".join(child) elif isinstance(child, str) and not quotes: s += "\n" + " " * (indent + 2) + "%s" % child else: s += "\n" + " " * (indent + 2) + repr(child) return s + parens[1] def pformat_latex_qtree(self): r""" Returns a representation of the tree compatible with the LaTeX qtree package. This consists of the string ``\Tree`` followed by the tree represented in bracketed notation. For example, the following result was generated from a parse tree of the sentence ``The announcement astounded us``:: \Tree [.I'' [.N'' [.D The ] [.N' [.N announcement ] ] ] [.I' [.V'' [.V' [.V astounded ] [.N'' [.N' [.N us ] ] ] ] ] ] ] See https://www.ling.upenn.edu/advice/latex.html for the LaTeX style file for the qtree package. :return: A latex qtree representation of this tree. :rtype: str """ reserved_chars = re.compile(r"([#\$%&~_\{\}])") pformat = self.pformat(indent=6, nodesep="", parens=("[.", " ]")) return r"\Tree " + re.sub(reserved_chars, r"\\\1", pformat) def _pformat_flat(self, nodesep, parens, quotes): childstrs = [] for child in self: if isinstance(child, Tree): childstrs.append(child._pformat_flat(nodesep, parens, quotes)) elif isinstance(child, tuple): childstrs.append("/".join(child)) elif isinstance(child, str) and not quotes: childstrs.append("%s" % child) else: childstrs.append(repr(child)) if isinstance(self._label, str): return "{}{}{} {}{}".format( parens[0], self._label, nodesep, " ".join(childstrs), parens[1], ) else: return "{}{}{} {}{}".format( parens[0], repr(self._label), nodesep, " ".join(childstrs), parens[1], ) def chomsky_normal_form( tree, factor="right", horzMarkov=None, vertMarkov=0, childChar="|", parentChar="^" ): # assume all subtrees have homogeneous children # assume all terminals have no siblings # A semi-hack to have elegant looking code below. As a result, # any subtree with a branching factor greater than 999 will be incorrectly truncated. if horzMarkov is None: horzMarkov = 999 # Traverse the tree depth-first keeping a list of ancestor nodes to the root. # I chose not to use the tree.treepositions() method since it requires # two traversals of the tree (one to get the positions, one to iterate # over them) and node access time is proportional to the height of the node. # This method is 7x faster which helps when parsing 40,000 sentences. nodeList = [(tree, [tree.label()])] while nodeList != []: node, parent = nodeList.pop() if isinstance(node, Tree): # parent annotation parentString = "" originalNode = node.label() if vertMarkov != 0 and node != tree and isinstance(node[0], Tree): parentString = "{}<{}>".format(parentChar, "-".join(parent)) node.set_label(node.label() + parentString) parent = [originalNode] + parent[: vertMarkov - 1] # add children to the agenda before we mess with them for child in node: nodeList.append((child, parent)) # chomsky normal form factorization if len(node) > 2: childNodes = [child.label() for child in node] nodeCopy = node.copy() node[0:] = [] # delete the children curNode = node numChildren = len(nodeCopy) for i in range(1, numChildren - 1): if factor == "right": newHead = "{}{}<{}>{}".format( originalNode, childChar, "-".join( childNodes[i : min([i + horzMarkov, numChildren])] ), parentString, ) # create new head newNode = Tree(newHead, []) curNode[0:] = [nodeCopy.pop(0), newNode] else: newHead = "{}{}<{}>{}".format( originalNode, childChar, "-".join( childNodes[max([numChildren - i - horzMarkov, 0]) : -i] ), parentString, ) newNode = Tree(newHead, []) curNode[0:] = [newNode, nodeCopy.pop()] curNode = newNode curNode[0:] = [child for child in nodeCopy] def collapse_unary(tree, collapsePOS=False, collapseRoot=False, joinChar="+"): """ Collapse subtrees with a single child (ie. unary productions) into a new non-terminal (Tree node) joined by 'joinChar'. This is useful when working with algorithms that do not allow unary productions, and completely removing the unary productions would require loss of useful information. The Tree is modified directly (since it is passed by reference) and no value is returned. :param tree: The Tree to be collapsed :type tree: Tree :param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie. Part-of-Speech tags) since they are always unary productions :type collapsePOS: bool :param collapseRoot: 'False' (default) will not modify the root production if it is unary. For the Penn WSJ treebank corpus, this corresponds to the TOP -> productions. :type collapseRoot: bool :param joinChar: A string used to connect collapsed node values (default = "+") :type joinChar: str """ if collapseRoot == False and isinstance(tree, Tree) and len(tree) == 1: nodeList = [tree[0]] else: nodeList = [tree] # depth-first traversal of tree while nodeList != []: node = nodeList.pop() if isinstance(node, Tree): if ( len(node) == 1 and isinstance(node[0], Tree) and (collapsePOS == True or isinstance(node[0, 0], Tree)) ): node.set_label(node.label() + joinChar + node[0].label()) node[0:] = [child for child in node[0]] # since we assigned the child's children to the current node, # evaluate the current node again nodeList.append(node) else: for child in node: nodeList.append(child) The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem: A demonstration showing how Trees and Trees can be used. This demonstration creates a Tree, and loads a Tree from the Treebank corpus, and shows the results of calling several of their methods. Here is the function: def demo(): """ A demonstration showing how Trees and Trees can be used. This demonstration creates a Tree, and loads a Tree from the Treebank corpus, and shows the results of calling several of their methods. """ from nltk import ProbabilisticTree, Tree # Demonstrate tree parsing. s = "(S (NP (DT the) (NN cat)) (VP (VBD ate) (NP (DT a) (NN cookie))))" t = Tree.fromstring(s) print("Convert bracketed string into tree:") print(t) print(t.__repr__()) print("Display tree properties:") print(t.label()) # tree's constituent type print(t[0]) # tree's first child print(t[1]) # tree's second child print(t.height()) print(t.leaves()) print(t[1]) print(t[1, 1]) print(t[1, 1, 0]) # Demonstrate tree modification. the_cat = t[0] the_cat.insert(1, Tree.fromstring("(JJ big)")) print("Tree modification:") print(t) t[1, 1, 1] = Tree.fromstring("(NN cake)") print(t) print() # Tree transforms print("Collapse unary:") t.collapse_unary() print(t) print("Chomsky normal form:") t.chomsky_normal_form() print(t) print() # Demonstrate probabilistic trees. pt = ProbabilisticTree("x", ["y", "z"], prob=0.5) print("Probabilistic Tree:") print(pt) print() # Demonstrate parsing of treebank output format. t = Tree.fromstring(t.pformat()) print("Convert tree to bracketed string and back again:") print(t) print() # Demonstrate LaTeX output print("LaTeX output:") print(t.pformat_latex_qtree()) print() # Demonstrate Productions print("Production output:") print(t.productions()) print() # Demonstrate tree nodes containing objects other than strings t.set_label(("test", 3)) print(t)
A demonstration showing how Trees and Trees can be used. This demonstration creates a Tree, and loads a Tree from the Treebank corpus, and shows the results of calling several of their methods.
170,659
import re from nltk.tree.tree import Tree The provided code snippet includes necessary dependencies for implementing the `bracket_parse` function. Write a Python function `def bracket_parse(s)` to solve the following problem: Use Tree.read(s, remove_empty_top_bracketing=True) instead. Here is the function: def bracket_parse(s): """ Use Tree.read(s, remove_empty_top_bracketing=True) instead. """ raise NameError("Use Tree.read(s, remove_empty_top_bracketing=True) instead.")
Use Tree.read(s, remove_empty_top_bracketing=True) instead.
170,660
import re from nltk.tree.tree import Tree class Tree(list): r""" A Tree represents a hierarchical grouping of leaves and subtrees. For example, each constituent in a syntax tree is represented by a single Tree. A tree's children are encoded as a list of leaves and subtrees, where a leaf is a basic (non-tree) value; and a subtree is a nested Tree. >>> from nltk.tree import Tree >>> print(Tree(1, [2, Tree(3, [4]), 5])) (1 2 (3 4) 5) >>> vp = Tree('VP', [Tree('V', ['saw']), ... Tree('NP', ['him'])]) >>> s = Tree('S', [Tree('NP', ['I']), vp]) >>> print(s) (S (NP I) (VP (V saw) (NP him))) >>> print(s[1]) (VP (V saw) (NP him)) >>> print(s[1,1]) (NP him) >>> t = Tree.fromstring("(S (NP I) (VP (V saw) (NP him)))") >>> s == t True >>> t[1][1].set_label('X') >>> t[1][1].label() 'X' >>> print(t) (S (NP I) (VP (V saw) (X him))) >>> t[0], t[1,1] = t[1,1], t[0] >>> print(t) (S (X him) (VP (V saw) (NP I))) The length of a tree is the number of children it has. >>> len(t) 2 The set_label() and label() methods allow individual constituents to be labeled. For example, syntax trees use this label to specify phrase tags, such as "NP" and "VP". Several Tree methods use "tree positions" to specify children or descendants of a tree. Tree positions are defined as follows: - The tree position *i* specifies a Tree's *i*\ th child. - The tree position ``()`` specifies the Tree itself. - If *p* is the tree position of descendant *d*, then *p+i* specifies the *i*\ th child of *d*. I.e., every tree position is either a single index *i*, specifying ``tree[i]``; or a sequence *i1, i2, ..., iN*, specifying ``tree[i1][i2]...[iN]``. Construct a new tree. This constructor can be called in one of two ways: - ``Tree(label, children)`` constructs a new tree with the specified label and list of children. - ``Tree.fromstring(s)`` constructs a new tree by parsing the string ``s``. """ def __init__(self, node, children=None): if children is None: raise TypeError( "%s: Expected a node value and child list " % type(self).__name__ ) elif isinstance(children, str): raise TypeError( "%s() argument 2 should be a list, not a " "string" % type(self).__name__ ) else: list.__init__(self, children) self._label = node # //////////////////////////////////////////////////////////// # Comparison operators # //////////////////////////////////////////////////////////// def __eq__(self, other): return self.__class__ is other.__class__ and (self._label, list(self)) == ( other._label, list(other), ) def __lt__(self, other): if not isinstance(other, Tree): # raise_unorderable_types("<", self, other) # Sometimes children can be pure strings, # so we need to be able to compare with non-trees: return self.__class__.__name__ < other.__class__.__name__ elif self.__class__ is other.__class__: return (self._label, list(self)) < (other._label, list(other)) else: return self.__class__.__name__ < other.__class__.__name__ # @total_ordering doesn't work here, since the class inherits from a builtin class __ne__ = lambda self, other: not self == other __gt__ = lambda self, other: not (self < other or self == other) __le__ = lambda self, other: self < other or self == other __ge__ = lambda self, other: not self < other # //////////////////////////////////////////////////////////// # Disabled list operations # //////////////////////////////////////////////////////////// def __mul__(self, v): raise TypeError("Tree does not support multiplication") def __rmul__(self, v): raise TypeError("Tree does not support multiplication") def __add__(self, v): raise TypeError("Tree does not support addition") def __radd__(self, v): raise TypeError("Tree does not support addition") # //////////////////////////////////////////////////////////// # Indexing (with support for tree positions) # //////////////////////////////////////////////////////////// def __getitem__(self, index): if isinstance(index, (int, slice)): return list.__getitem__(self, index) elif isinstance(index, (list, tuple)): if len(index) == 0: return self elif len(index) == 1: return self[index[0]] else: return self[index[0]][index[1:]] else: raise TypeError( "%s indices must be integers, not %s" % (type(self).__name__, type(index).__name__) ) def __setitem__(self, index, value): if isinstance(index, (int, slice)): return list.__setitem__(self, index, value) elif isinstance(index, (list, tuple)): if len(index) == 0: raise IndexError("The tree position () may not be " "assigned to.") elif len(index) == 1: self[index[0]] = value else: self[index[0]][index[1:]] = value else: raise TypeError( "%s indices must be integers, not %s" % (type(self).__name__, type(index).__name__) ) def __delitem__(self, index): if isinstance(index, (int, slice)): return list.__delitem__(self, index) elif isinstance(index, (list, tuple)): if len(index) == 0: raise IndexError("The tree position () may not be deleted.") elif len(index) == 1: del self[index[0]] else: del self[index[0]][index[1:]] else: raise TypeError( "%s indices must be integers, not %s" % (type(self).__name__, type(index).__name__) ) # //////////////////////////////////////////////////////////// # Basic tree operations # //////////////////////////////////////////////////////////// def _get_node(self): """Outdated method to access the node value; use the label() method instead.""" def _set_node(self, value): """Outdated method to set the node value; use the set_label() method instead.""" node = property(_get_node, _set_node) def label(self): """ Return the node label of the tree. >>> t = Tree.fromstring('(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))') >>> t.label() 'S' :return: the node label (typically a string) :rtype: any """ return self._label def set_label(self, label): """ Set the node label of the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.set_label("T") >>> print(t) (T (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat)))) :param label: the node label (typically a string) :type label: any """ self._label = label def leaves(self): """ Return the leaves of the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.leaves() ['the', 'dog', 'chased', 'the', 'cat'] :return: a list containing this tree's leaves. The order reflects the order of the leaves in the tree's hierarchical structure. :rtype: list """ leaves = [] for child in self: if isinstance(child, Tree): leaves.extend(child.leaves()) else: leaves.append(child) return leaves def flatten(self): """ Return a flat version of the tree, with all non-root non-terminals removed. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> print(t.flatten()) (S the dog chased the cat) :return: a tree consisting of this tree's root connected directly to its leaves, omitting all intervening non-terminal nodes. :rtype: Tree """ return Tree(self.label(), self.leaves()) def height(self): """ Return the height of the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.height() 5 >>> print(t[0,0]) (D the) >>> t[0,0].height() 2 :return: The height of this tree. The height of a tree containing no children is 1; the height of a tree containing only leaves is 2; and the height of any other tree is one plus the maximum of its children's heights. :rtype: int """ max_child_height = 0 for child in self: if isinstance(child, Tree): max_child_height = max(max_child_height, child.height()) else: max_child_height = max(max_child_height, 1) return 1 + max_child_height def treepositions(self, order="preorder"): """ >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.treepositions() # doctest: +ELLIPSIS [(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0), (1, 0, 0), ...] >>> for pos in t.treepositions('leaves'): ... t[pos] = t[pos][::-1].upper() >>> print(t) (S (NP (D EHT) (N GOD)) (VP (V DESAHC) (NP (D EHT) (N TAC)))) :param order: One of: ``preorder``, ``postorder``, ``bothorder``, ``leaves``. """ positions = [] if order in ("preorder", "bothorder"): positions.append(()) for i, child in enumerate(self): if isinstance(child, Tree): childpos = child.treepositions(order) positions.extend((i,) + p for p in childpos) else: positions.append((i,)) if order in ("postorder", "bothorder"): positions.append(()) return positions def subtrees(self, filter=None): """ Generate all the subtrees of this tree, optionally restricted to trees matching the filter function. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> for s in t.subtrees(lambda t: t.height() == 2): ... print(s) (D the) (N dog) (V chased) (D the) (N cat) :type filter: function :param filter: the function to filter all local trees """ if not filter or filter(self): yield self for child in self: if isinstance(child, Tree): yield from child.subtrees(filter) def productions(self): """ Generate the productions that correspond to the non-terminal nodes of the tree. For each subtree of the form (P: C1 C2 ... Cn) this produces a production of the form P -> C1 C2 ... Cn. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.productions() # doctest: +NORMALIZE_WHITESPACE [S -> NP VP, NP -> D N, D -> 'the', N -> 'dog', VP -> V NP, V -> 'chased', NP -> D N, D -> 'the', N -> 'cat'] :rtype: list(Production) """ if not isinstance(self._label, str): raise TypeError( "Productions can only be generated from trees having node labels that are strings" ) prods = [Production(Nonterminal(self._label), _child_names(self))] for child in self: if isinstance(child, Tree): prods += child.productions() return prods def pos(self): """ Return a sequence of pos-tagged words extracted from the tree. >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") >>> t.pos() [('the', 'D'), ('dog', 'N'), ('chased', 'V'), ('the', 'D'), ('cat', 'N')] :return: a list of tuples containing leaves and pre-terminals (part-of-speech tags). The order reflects the order of the leaves in the tree's hierarchical structure. :rtype: list(tuple) """ pos = [] for child in self: if isinstance(child, Tree): pos.extend(child.pos()) else: pos.append((child, self._label)) return pos def leaf_treeposition(self, index): """ :return: The tree position of the ``index``-th leaf in this tree. I.e., if ``tp=self.leaf_treeposition(i)``, then ``self[tp]==self.leaves()[i]``. :raise IndexError: If this tree contains fewer than ``index+1`` leaves, or if ``index<0``. """ if index < 0: raise IndexError("index must be non-negative") stack = [(self, ())] while stack: value, treepos = stack.pop() if not isinstance(value, Tree): if index == 0: return treepos else: index -= 1 else: for i in range(len(value) - 1, -1, -1): stack.append((value[i], treepos + (i,))) raise IndexError("index must be less than or equal to len(self)") def treeposition_spanning_leaves(self, start, end): """ :return: The tree position of the lowest descendant of this tree that dominates ``self.leaves()[start:end]``. :raise ValueError: if ``end <= start`` """ if end <= start: raise ValueError("end must be greater than start") # Find the tree positions of the start & end leaves, and # take the longest common subsequence. start_treepos = self.leaf_treeposition(start) end_treepos = self.leaf_treeposition(end - 1) # Find the first index where they mismatch: for i in range(len(start_treepos)): if i == len(end_treepos) or start_treepos[i] != end_treepos[i]: return start_treepos[:i] return start_treepos # //////////////////////////////////////////////////////////// # Transforms # //////////////////////////////////////////////////////////// def chomsky_normal_form( self, factor="right", horzMarkov=None, vertMarkov=0, childChar="|", parentChar="^", ): """ This method can modify a tree in three ways: 1. Convert a tree into its Chomsky Normal Form (CNF) equivalent -- Every subtree has either two non-terminals or one terminal as its children. This process requires the creation of more"artificial" non-terminal nodes. 2. Markov (vertical) smoothing of children in new artificial nodes 3. Horizontal (parent) annotation of nodes :param factor: Right or left factoring method (default = "right") :type factor: str = [left|right] :param horzMarkov: Markov order for sibling smoothing in artificial nodes (None (default) = include all siblings) :type horzMarkov: int | None :param vertMarkov: Markov order for parent smoothing (0 (default) = no vertical annotation) :type vertMarkov: int | None :param childChar: A string used in construction of the artificial nodes, separating the head of the original subtree from the child nodes that have yet to be expanded (default = "|") :type childChar: str :param parentChar: A string used to separate the node representation from its vertical annotation :type parentChar: str """ from nltk.tree.transforms import chomsky_normal_form chomsky_normal_form(self, factor, horzMarkov, vertMarkov, childChar, parentChar) def un_chomsky_normal_form( self, expandUnary=True, childChar="|", parentChar="^", unaryChar="+" ): """ This method modifies the tree in three ways: 1. Transforms a tree in Chomsky Normal Form back to its original structure (branching greater than two) 2. Removes any parent annotation (if it exists) 3. (optional) expands unary subtrees (if previously collapsed with collapseUnary(...) ) :param expandUnary: Flag to expand unary or not (default = True) :type expandUnary: bool :param childChar: A string separating the head node from its children in an artificial node (default = "|") :type childChar: str :param parentChar: A string separating the node label from its parent annotation (default = "^") :type parentChar: str :param unaryChar: A string joining two non-terminals in a unary production (default = "+") :type unaryChar: str """ from nltk.tree.transforms import un_chomsky_normal_form un_chomsky_normal_form(self, expandUnary, childChar, parentChar, unaryChar) def collapse_unary(self, collapsePOS=False, collapseRoot=False, joinChar="+"): """ Collapse subtrees with a single child (ie. unary productions) into a new non-terminal (Tree node) joined by 'joinChar'. This is useful when working with algorithms that do not allow unary productions, and completely removing the unary productions would require loss of useful information. The Tree is modified directly (since it is passed by reference) and no value is returned. :param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie. Part-of-Speech tags) since they are always unary productions :type collapsePOS: bool :param collapseRoot: 'False' (default) will not modify the root production if it is unary. For the Penn WSJ treebank corpus, this corresponds to the TOP -> productions. :type collapseRoot: bool :param joinChar: A string used to connect collapsed node values (default = "+") :type joinChar: str """ from nltk.tree.transforms import collapse_unary collapse_unary(self, collapsePOS, collapseRoot, joinChar) # //////////////////////////////////////////////////////////// # Convert, copy # //////////////////////////////////////////////////////////// def convert(cls, tree): """ Convert a tree between different subtypes of Tree. ``cls`` determines which class will be used to encode the new tree. :type tree: Tree :param tree: The tree that should be converted. :return: The new Tree. """ if isinstance(tree, Tree): children = [cls.convert(child) for child in tree] return cls(tree._label, children) else: return tree def __copy__(self): return self.copy() def __deepcopy__(self, memo): return self.copy(deep=True) def copy(self, deep=False): if not deep: return type(self)(self._label, self) else: return type(self).convert(self) def _frozen_class(self): from nltk.tree.immutable import ImmutableTree return ImmutableTree def freeze(self, leaf_freezer=None): frozen_class = self._frozen_class() if leaf_freezer is None: newcopy = frozen_class.convert(self) else: newcopy = self.copy(deep=True) for pos in newcopy.treepositions("leaves"): newcopy[pos] = leaf_freezer(newcopy[pos]) newcopy = frozen_class.convert(newcopy) hash(newcopy) # Make sure the leaves are hashable. return newcopy # //////////////////////////////////////////////////////////// # Parsing # //////////////////////////////////////////////////////////// def fromstring( cls, s, brackets="()", read_node=None, read_leaf=None, node_pattern=None, leaf_pattern=None, remove_empty_top_bracketing=False, ): """ Read a bracketed tree string and return the resulting tree. Trees are represented as nested brackettings, such as:: (S (NP (NNP John)) (VP (V runs))) :type s: str :param s: The string to read :type brackets: str (length=2) :param brackets: The bracket characters used to mark the beginning and end of trees and subtrees. :type read_node: function :type read_leaf: function :param read_node, read_leaf: If specified, these functions are applied to the substrings of ``s`` corresponding to nodes and leaves (respectively) to obtain the values for those nodes and leaves. They should have the following signature: read_node(str) -> value For example, these functions could be used to process nodes and leaves whose values should be some type other than string (such as ``FeatStruct``). Note that by default, node strings and leaf strings are delimited by whitespace and brackets; to override this default, use the ``node_pattern`` and ``leaf_pattern`` arguments. :type node_pattern: str :type leaf_pattern: str :param node_pattern, leaf_pattern: Regular expression patterns used to find node and leaf substrings in ``s``. By default, both nodes patterns are defined to match any sequence of non-whitespace non-bracket characters. :type remove_empty_top_bracketing: bool :param remove_empty_top_bracketing: If the resulting tree has an empty node label, and is length one, then return its single child instead. This is useful for treebank trees, which sometimes contain an extra level of bracketing. :return: A tree corresponding to the string representation ``s``. If this class method is called using a subclass of Tree, then it will return a tree of that type. :rtype: Tree """ if not isinstance(brackets, str) or len(brackets) != 2: raise TypeError("brackets must be a length-2 string") if re.search(r"\s", brackets): raise TypeError("whitespace brackets not allowed") # Construct a regexp that will tokenize the string. open_b, close_b = brackets open_pattern, close_pattern = (re.escape(open_b), re.escape(close_b)) if node_pattern is None: node_pattern = rf"[^\s{open_pattern}{close_pattern}]+" if leaf_pattern is None: leaf_pattern = rf"[^\s{open_pattern}{close_pattern}]+" token_re = re.compile( r"%s\s*(%s)?|%s|(%s)" % (open_pattern, node_pattern, close_pattern, leaf_pattern) ) # Walk through each token, updating a stack of trees. stack = [(None, [])] # list of (node, children) tuples for match in token_re.finditer(s): token = match.group() # Beginning of a tree/subtree if token[0] == open_b: if len(stack) == 1 and len(stack[0][1]) > 0: cls._parse_error(s, match, "end-of-string") label = token[1:].lstrip() if read_node is not None: label = read_node(label) stack.append((label, [])) # End of a tree/subtree elif token == close_b: if len(stack) == 1: if len(stack[0][1]) == 0: cls._parse_error(s, match, open_b) else: cls._parse_error(s, match, "end-of-string") label, children = stack.pop() stack[-1][1].append(cls(label, children)) # Leaf node else: if len(stack) == 1: cls._parse_error(s, match, open_b) if read_leaf is not None: token = read_leaf(token) stack[-1][1].append(token) # check that we got exactly one complete tree. if len(stack) > 1: cls._parse_error(s, "end-of-string", close_b) elif len(stack[0][1]) == 0: cls._parse_error(s, "end-of-string", open_b) else: assert stack[0][0] is None assert len(stack[0][1]) == 1 tree = stack[0][1][0] # If the tree has an extra level with node='', then get rid of # it. E.g.: "((S (NP ...) (VP ...)))" if remove_empty_top_bracketing and tree._label == "" and len(tree) == 1: tree = tree[0] # return the tree. return tree def _parse_error(cls, s, match, expecting): """ Display a friendly error message when parsing a tree string fails. :param s: The string we're parsing. :param match: regexp match of the problem token. :param expecting: what we expected to see instead. """ # Construct a basic error message if match == "end-of-string": pos, token = len(s), "end-of-string" else: pos, token = match.start(), match.group() msg = "%s.read(): expected %r but got %r\n%sat index %d." % ( cls.__name__, expecting, token, " " * 12, pos, ) # Add a display showing the error token itsels: s = s.replace("\n", " ").replace("\t", " ") offset = pos if len(s) > pos + 10: s = s[: pos + 10] + "..." if pos > 10: s = "..." + s[pos - 10 :] offset = 13 msg += '\n{}"{}"\n{}^'.format(" " * 16, s, " " * (17 + offset)) raise ValueError(msg) def fromlist(cls, l): """ :type l: list :param l: a tree represented as nested lists :return: A tree corresponding to the list representation ``l``. :rtype: Tree Convert nested lists to a NLTK Tree """ if type(l) == list and len(l) > 0: label = repr(l[0]) if len(l) > 1: return Tree(label, [cls.fromlist(child) for child in l[1:]]) else: return label # //////////////////////////////////////////////////////////// # Visualization & String Representation # //////////////////////////////////////////////////////////// def draw(self): """ Open a new window containing a graphical diagram of this tree. """ from nltk.draw.tree import draw_trees draw_trees(self) def pretty_print(self, sentence=None, highlight=(), stream=None, **kwargs): """ Pretty-print this tree as ASCII or Unicode art. For explanation of the arguments, see the documentation for `nltk.tree.prettyprinter.TreePrettyPrinter`. """ from nltk.tree.prettyprinter import TreePrettyPrinter print(TreePrettyPrinter(self, sentence, highlight).text(**kwargs), file=stream) def __repr__(self): childstr = ", ".join(repr(c) for c in self) return "{}({}, [{}])".format( type(self).__name__, repr(self._label), childstr, ) def _repr_svg_(self): from svgling import draw_tree return draw_tree(self)._repr_svg_() def __str__(self): return self.pformat() def pprint(self, **kwargs): """ Print a string representation of this Tree to 'stream' """ if "stream" in kwargs: stream = kwargs["stream"] del kwargs["stream"] else: stream = None print(self.pformat(**kwargs), file=stream) def pformat(self, margin=70, indent=0, nodesep="", parens="()", quotes=False): """ :return: A pretty-printed string representation of this tree. :rtype: str :param margin: The right margin at which to do line-wrapping. :type margin: int :param indent: The indentation level at which printing begins. This number is used to decide how far to indent subsequent lines. :type indent: int :param nodesep: A string that is used to separate the node from the children. E.g., the default value ``':'`` gives trees like ``(S: (NP: I) (VP: (V: saw) (NP: it)))``. """ # Try writing it on one line. s = self._pformat_flat(nodesep, parens, quotes) if len(s) + indent < margin: return s # If it doesn't fit on one line, then write it on multi-lines. if isinstance(self._label, str): s = f"{parens[0]}{self._label}{nodesep}" else: s = f"{parens[0]}{repr(self._label)}{nodesep}" for child in self: if isinstance(child, Tree): s += ( "\n" + " " * (indent + 2) + child.pformat(margin, indent + 2, nodesep, parens, quotes) ) elif isinstance(child, tuple): s += "\n" + " " * (indent + 2) + "/".join(child) elif isinstance(child, str) and not quotes: s += "\n" + " " * (indent + 2) + "%s" % child else: s += "\n" + " " * (indent + 2) + repr(child) return s + parens[1] def pformat_latex_qtree(self): r""" Returns a representation of the tree compatible with the LaTeX qtree package. This consists of the string ``\Tree`` followed by the tree represented in bracketed notation. For example, the following result was generated from a parse tree of the sentence ``The announcement astounded us``:: \Tree [.I'' [.N'' [.D The ] [.N' [.N announcement ] ] ] [.I' [.V'' [.V' [.V astounded ] [.N'' [.N' [.N us ] ] ] ] ] ] ] See https://www.ling.upenn.edu/advice/latex.html for the LaTeX style file for the qtree package. :return: A latex qtree representation of this tree. :rtype: str """ reserved_chars = re.compile(r"([#\$%&~_\{\}])") pformat = self.pformat(indent=6, nodesep="", parens=("[.", " ]")) return r"\Tree " + re.sub(reserved_chars, r"\\\1", pformat) def _pformat_flat(self, nodesep, parens, quotes): childstrs = [] for child in self: if isinstance(child, Tree): childstrs.append(child._pformat_flat(nodesep, parens, quotes)) elif isinstance(child, tuple): childstrs.append("/".join(child)) elif isinstance(child, str) and not quotes: childstrs.append("%s" % child) else: childstrs.append(repr(child)) if isinstance(self._label, str): return "{}{}{} {}{}".format( parens[0], self._label, nodesep, " ".join(childstrs), parens[1], ) else: return "{}{}{} {}{}".format( parens[0], repr(self._label), nodesep, " ".join(childstrs), parens[1], ) The provided code snippet includes necessary dependencies for implementing the `sinica_parse` function. Write a Python function `def sinica_parse(s)` to solve the following problem: Parse a Sinica Treebank string and return a tree. Trees are represented as nested brackettings, as shown in the following example (X represents a Chinese character): S(goal:NP(Head:Nep:XX)|theme:NP(Head:Nhaa:X)|quantity:Dab:X|Head:VL2:X)#0(PERIODCATEGORY) :return: A tree corresponding to the string representation. :rtype: Tree :param s: The string to be converted :type s: str Here is the function: def sinica_parse(s): """ Parse a Sinica Treebank string and return a tree. Trees are represented as nested brackettings, as shown in the following example (X represents a Chinese character): S(goal:NP(Head:Nep:XX)|theme:NP(Head:Nhaa:X)|quantity:Dab:X|Head:VL2:X)#0(PERIODCATEGORY) :return: A tree corresponding to the string representation. :rtype: Tree :param s: The string to be converted :type s: str """ tokens = re.split(r"([()| ])", s) for i in range(len(tokens)): if tokens[i] == "(": tokens[i - 1], tokens[i] = ( tokens[i], tokens[i - 1], ) # pull nonterminal inside parens elif ":" in tokens[i]: fields = tokens[i].split(":") if len(fields) == 2: # non-terminal tokens[i] = fields[1] else: tokens[i] = "(" + fields[-2] + " " + fields[-1] + ")" elif tokens[i] == "|": tokens[i] = "" treebank_string = " ".join(tokens) return Tree.fromstring(treebank_string, remove_empty_top_bracketing=True)
Parse a Sinica Treebank string and return a tree. Trees are represented as nested brackettings, as shown in the following example (X represents a Chinese character): S(goal:NP(Head:Nep:XX)|theme:NP(Head:Nhaa:X)|quantity:Dab:X|Head:VL2:X)#0(PERIODCATEGORY) :return: A tree corresponding to the string representation. :rtype: Tree :param s: The string to be converted :type s: str
170,661
import codecs import re from io import StringIO from xml.etree.ElementTree import Element, ElementTree, SubElement, TreeBuilder from nltk.data import PathPointer, find _is_value = re.compile(r"\S") The provided code snippet includes necessary dependencies for implementing the `to_sfm_string` function. Write a Python function `def to_sfm_string(tree, encoding=None, errors="strict", unicode_fields=None)` to solve the following problem: Return a string with a standard format representation of the toolbox data in tree (tree can be a toolbox database or a single record). :param tree: flat representation of toolbox data (whole database or single record) :type tree: ElementTree._ElementInterface :param encoding: Name of an encoding to use. :type encoding: str :param errors: Error handling scheme for codec. Same as the ``encode()`` builtin string method. :type errors: str :param unicode_fields: :type unicode_fields: dict(str) or set(str) :rtype: str Here is the function: def to_sfm_string(tree, encoding=None, errors="strict", unicode_fields=None): """ Return a string with a standard format representation of the toolbox data in tree (tree can be a toolbox database or a single record). :param tree: flat representation of toolbox data (whole database or single record) :type tree: ElementTree._ElementInterface :param encoding: Name of an encoding to use. :type encoding: str :param errors: Error handling scheme for codec. Same as the ``encode()`` builtin string method. :type errors: str :param unicode_fields: :type unicode_fields: dict(str) or set(str) :rtype: str """ if tree.tag == "record": root = Element("toolbox_data") root.append(tree) tree = root if tree.tag != "toolbox_data": raise ValueError("not a toolbox_data element structure") if encoding is None and unicode_fields is not None: raise ValueError( "if encoding is not specified then neither should unicode_fields" ) l = [] for rec in tree: l.append("\n") for field in rec: mkr = field.tag value = field.text if encoding is not None: if unicode_fields is not None and mkr in unicode_fields: cur_encoding = "utf8" else: cur_encoding = encoding if re.search(_is_value, value): l.append((f"\\{mkr} {value}\n").encode(cur_encoding, errors)) else: l.append((f"\\{mkr}{value}\n").encode(cur_encoding, errors)) else: if re.search(_is_value, value): l.append(f"\\{mkr} {value}\n") else: l.append(f"\\{mkr}{value}\n") return "".join(l[1:])
Return a string with a standard format representation of the toolbox data in tree (tree can be a toolbox database or a single record). :param tree: flat representation of toolbox data (whole database or single record) :type tree: ElementTree._ElementInterface :param encoding: Name of an encoding to use. :type encoding: str :param errors: Error handling scheme for codec. Same as the ``encode()`` builtin string method. :type errors: str :param unicode_fields: :type unicode_fields: dict(str) or set(str) :rtype: str
170,662
import codecs import re from io import StringIO from xml.etree.ElementTree import Element, ElementTree, SubElement, TreeBuilder from nltk.data import PathPointer, find The provided code snippet includes necessary dependencies for implementing the `remove_blanks` function. Write a Python function `def remove_blanks(elem)` to solve the following problem: Remove all elements and subelements with no text and no child elements. :param elem: toolbox data in an elementtree structure :type elem: ElementTree._ElementInterface Here is the function: def remove_blanks(elem): """ Remove all elements and subelements with no text and no child elements. :param elem: toolbox data in an elementtree structure :type elem: ElementTree._ElementInterface """ out = list() for child in elem: remove_blanks(child) if child.text or len(child) > 0: out.append(child) elem[:] = out
Remove all elements and subelements with no text and no child elements. :param elem: toolbox data in an elementtree structure :type elem: ElementTree._ElementInterface
170,663
import codecs import re from io import StringIO from xml.etree.ElementTree import Element, ElementTree, SubElement, TreeBuilder from nltk.data import PathPointer, find def find(resource_name, paths=None): """ Find the given resource by searching through the directories and zip files in paths, where a None or empty string specifies an absolute path. Returns a corresponding path name. If the given resource is not found, raise a ``LookupError``, whose message gives a pointer to the installation instructions for the NLTK downloader. Zip File Handling: - If ``resource_name`` contains a component with a ``.zip`` extension, then it is assumed to be a zipfile; and the remaining path components are used to look inside the zipfile. - If any element of ``nltk.data.path`` has a ``.zip`` extension, then it is assumed to be a zipfile. - If a given resource name that does not contain any zipfile component is not found initially, then ``find()`` will make a second attempt to find that resource, by replacing each component *p* in the path with *p.zip/p*. For example, this allows ``find()`` to map the resource name ``corpora/chat80/cities.pl`` to a zip file path pointer to ``corpora/chat80.zip/chat80/cities.pl``. - When using ``find()`` to locate a directory contained in a zipfile, the resource name must end with the forward slash character. Otherwise, ``find()`` will not locate the directory. :type resource_name: str or unicode :param resource_name: The name of the resource to search for. Resource names are posix-style relative path names, such as ``corpora/brown``. Directory names will be automatically converted to a platform-appropriate path separator. :rtype: str """ resource_name = normalize_resource_name(resource_name, True) # Resolve default paths at runtime in-case the user overrides # nltk.data.path if paths is None: paths = path # Check if the resource name includes a zipfile name m = re.match(r"(.*\.zip)/?(.*)$|", resource_name) zipfile, zipentry = m.groups() # Check each item in our path for path_ in paths: # Is the path item a zipfile? if path_ and (os.path.isfile(path_) and path_.endswith(".zip")): try: return ZipFilePathPointer(path_, resource_name) except OSError: # resource not in zipfile continue # Is the path item a directory or is resource_name an absolute path? elif not path_ or os.path.isdir(path_): if zipfile is None: p = os.path.join(path_, url2pathname(resource_name)) if os.path.exists(p): if p.endswith(".gz"): return GzipFileSystemPathPointer(p) else: return FileSystemPathPointer(p) else: p = os.path.join(path_, url2pathname(zipfile)) if os.path.exists(p): try: return ZipFilePathPointer(p, zipentry) except OSError: # resource not in zipfile continue # Fallback: if the path doesn't include a zip file, then try # again, assuming that one of the path components is inside a # zipfile of the same name. if zipfile is None: pieces = resource_name.split("/") for i in range(len(pieces)): modified_name = "/".join(pieces[:i] + [pieces[i] + ".zip"] + pieces[i:]) try: return find(modified_name, paths) except LookupError: pass # Identify the package (i.e. the .zip file) to download. resource_zipname = resource_name.split("/")[1] if resource_zipname.endswith(".zip"): resource_zipname = resource_zipname.rpartition(".")[0] # Display a friendly error message if the resource wasn't found: msg = str( "Resource \33[93m{resource}\033[0m not found.\n" "Please use the NLTK Downloader to obtain the resource:\n\n" "\33[31m" # To display red text in terminal. ">>> import nltk\n" ">>> nltk.download('{resource}')\n" "\033[0m" ).format(resource=resource_zipname) msg = textwrap_indent(msg) msg += "\n For more information see: https://www.nltk.org/data.html\n" msg += "\n Attempted to load \33[93m{resource_name}\033[0m\n".format( resource_name=resource_name ) msg += "\n Searched in:" + "".join("\n - %r" % d for d in paths) sep = "*" * 70 resource_not_found = f"\n{sep}\n{msg}\n{sep}\n" raise LookupError(resource_not_found) The provided code snippet includes necessary dependencies for implementing the `add_default_fields` function. Write a Python function `def add_default_fields(elem, default_fields)` to solve the following problem: Add blank elements and subelements specified in default_fields. :param elem: toolbox data in an elementtree structure :type elem: ElementTree._ElementInterface :param default_fields: fields to add to each type of element and subelement :type default_fields: dict(tuple) Here is the function: def add_default_fields(elem, default_fields): """ Add blank elements and subelements specified in default_fields. :param elem: toolbox data in an elementtree structure :type elem: ElementTree._ElementInterface :param default_fields: fields to add to each type of element and subelement :type default_fields: dict(tuple) """ for field in default_fields.get(elem.tag, []): if elem.find(field) is None: SubElement(elem, field) for child in elem: add_default_fields(child, default_fields)
Add blank elements and subelements specified in default_fields. :param elem: toolbox data in an elementtree structure :type elem: ElementTree._ElementInterface :param default_fields: fields to add to each type of element and subelement :type default_fields: dict(tuple)
170,664
import codecs import re from io import StringIO from xml.etree.ElementTree import Element, ElementTree, SubElement, TreeBuilder from nltk.data import PathPointer, find def _sort_fields(elem, orders_dicts): """sort the children of elem""" try: order = orders_dicts[elem.tag] except KeyError: pass else: tmp = sorted( ((order.get(child.tag, 1e9), i), child) for i, child in enumerate(elem) ) elem[:] = [child for key, child in tmp] for child in elem: if len(child): _sort_fields(child, orders_dicts) The provided code snippet includes necessary dependencies for implementing the `sort_fields` function. Write a Python function `def sort_fields(elem, field_orders)` to solve the following problem: Sort the elements and subelements in order specified in field_orders. :param elem: toolbox data in an elementtree structure :type elem: ElementTree._ElementInterface :param field_orders: order of fields for each type of element and subelement :type field_orders: dict(tuple) Here is the function: def sort_fields(elem, field_orders): """ Sort the elements and subelements in order specified in field_orders. :param elem: toolbox data in an elementtree structure :type elem: ElementTree._ElementInterface :param field_orders: order of fields for each type of element and subelement :type field_orders: dict(tuple) """ order_dicts = dict() for field, order in field_orders.items(): order_dicts[field] = order_key = dict() for i, subfield in enumerate(order): order_key[subfield] = i _sort_fields(elem, order_dicts)
Sort the elements and subelements in order specified in field_orders. :param elem: toolbox data in an elementtree structure :type elem: ElementTree._ElementInterface :param field_orders: order of fields for each type of element and subelement :type field_orders: dict(tuple)
170,665
import codecs import re from io import StringIO from xml.etree.ElementTree import Element, ElementTree, SubElement, TreeBuilder from nltk.data import PathPointer, find The provided code snippet includes necessary dependencies for implementing the `add_blank_lines` function. Write a Python function `def add_blank_lines(tree, blanks_before, blanks_between)` to solve the following problem: Add blank lines before all elements and subelements specified in blank_before. :param elem: toolbox data in an elementtree structure :type elem: ElementTree._ElementInterface :param blank_before: elements and subelements to add blank lines before :type blank_before: dict(tuple) Here is the function: def add_blank_lines(tree, blanks_before, blanks_between): """ Add blank lines before all elements and subelements specified in blank_before. :param elem: toolbox data in an elementtree structure :type elem: ElementTree._ElementInterface :param blank_before: elements and subelements to add blank lines before :type blank_before: dict(tuple) """ try: before = blanks_before[tree.tag] between = blanks_between[tree.tag] except KeyError: for elem in tree: if len(elem): add_blank_lines(elem, blanks_before, blanks_between) else: last_elem = None for elem in tree: tag = elem.tag if last_elem is not None and last_elem.tag != tag: if tag in before and last_elem is not None: e = last_elem.getiterator()[-1] e.text = (e.text or "") + "\n" else: if tag in between: e = last_elem.getiterator()[-1] e.text = (e.text or "") + "\n" if len(elem): add_blank_lines(elem, blanks_before, blanks_between) last_elem = elem
Add blank lines before all elements and subelements specified in blank_before. :param elem: toolbox data in an elementtree structure :type elem: ElementTree._ElementInterface :param blank_before: elements and subelements to add blank lines before :type blank_before: dict(tuple)
170,666
import codecs import re from io import StringIO from xml.etree.ElementTree import Element, ElementTree, SubElement, TreeBuilder from nltk.data import PathPointer, find class ToolboxData(StandardFormat): def parse(self, grammar=None, **kwargs): if grammar: return self._chunk_parse(grammar=grammar, **kwargs) else: return self._record_parse(**kwargs) def _record_parse(self, key=None, **kwargs): r""" Returns an element tree structure corresponding to a toolbox data file with all markers at the same level. Thus the following Toolbox database:: \_sh v3.0 400 Rotokas Dictionary \_DateStampHasFourDigitYear \lx kaa \ps V.A \ge gag \gp nek i pas \lx kaa \ps V.B \ge strangle \gp pasim nek after parsing will end up with the same structure (ignoring the extra whitespace) as the following XML fragment after being parsed by ElementTree:: <toolbox_data> <header> <_sh>v3.0 400 Rotokas Dictionary</_sh> <_DateStampHasFourDigitYear/> </header> <record> <lx>kaa</lx> <ps>V.A</ps> <ge>gag</ge> <gp>nek i pas</gp> </record> <record> <lx>kaa</lx> <ps>V.B</ps> <ge>strangle</ge> <gp>pasim nek</gp> </record> </toolbox_data> :param key: Name of key marker at the start of each record. If set to None (the default value) the first marker that doesn't begin with an underscore is assumed to be the key. :type key: str :param kwargs: Keyword arguments passed to ``StandardFormat.fields()`` :type kwargs: dict :rtype: ElementTree._ElementInterface :return: contents of toolbox data divided into header and records """ builder = TreeBuilder() builder.start("toolbox_data", {}) builder.start("header", {}) in_records = False for mkr, value in self.fields(**kwargs): if key is None and not in_records and mkr[0] != "_": key = mkr if mkr == key: if in_records: builder.end("record") else: builder.end("header") in_records = True builder.start("record", {}) builder.start(mkr, {}) builder.data(value) builder.end(mkr) if in_records: builder.end("record") else: builder.end("header") builder.end("toolbox_data") return builder.close() def _tree2etree(self, parent): from nltk.tree import Tree root = Element(parent.label()) for child in parent: if isinstance(child, Tree): root.append(self._tree2etree(child)) else: text, tag = child e = SubElement(root, tag) e.text = text return root def _chunk_parse(self, grammar=None, root_label="record", trace=0, **kwargs): """ Returns an element tree structure corresponding to a toolbox data file parsed according to the chunk grammar. :type grammar: str :param grammar: Contains the chunking rules used to parse the database. See ``chunk.RegExp`` for documentation. :type root_label: str :param root_label: The node value that should be used for the top node of the chunk structure. :type trace: int :param trace: The level of tracing that should be used when parsing a text. ``0`` will generate no tracing output; ``1`` will generate normal tracing output; and ``2`` or higher will generate verbose tracing output. :type kwargs: dict :param kwargs: Keyword arguments passed to ``toolbox.StandardFormat.fields()`` :rtype: ElementTree._ElementInterface """ from nltk import chunk from nltk.tree import Tree cp = chunk.RegexpParser(grammar, root_label=root_label, trace=trace) db = self.parse(**kwargs) tb_etree = Element("toolbox_data") header = db.find("header") tb_etree.append(header) for record in db.findall("record"): parsed = cp.parse([(elem.text, elem.tag) for elem in record]) tb_etree.append(self._tree2etree(parsed)) return tb_etree class ToolboxSettings(StandardFormat): """This class is the base class for settings files.""" def __init__(self): super().__init__() def parse(self, encoding=None, errors="strict", **kwargs): """ Return the contents of toolbox settings file with a nested structure. :param encoding: encoding used by settings file :type encoding: str :param errors: Error handling scheme for codec. Same as ``decode()`` builtin method. :type errors: str :param kwargs: Keyword arguments passed to ``StandardFormat.fields()`` :type kwargs: dict :rtype: ElementTree._ElementInterface """ builder = TreeBuilder() for mkr, value in self.fields(encoding=encoding, errors=errors, **kwargs): # Check whether the first char of the field marker # indicates a block start (+) or end (-) block = mkr[0] if block in ("+", "-"): mkr = mkr[1:] else: block = None # Build tree on the basis of block char if block == "+": builder.start(mkr, {}) builder.data(value) elif block == "-": builder.end(mkr) else: builder.start(mkr, {}) builder.data(value) builder.end(mkr) return builder.close() def to_settings_string(tree, encoding=None, errors="strict", unicode_fields=None): # write XML to file l = list() _to_settings_string( tree.getroot(), l, encoding=encoding, errors=errors, unicode_fields=unicode_fields, ) return "".join(l) def find(resource_name, paths=None): """ Find the given resource by searching through the directories and zip files in paths, where a None or empty string specifies an absolute path. Returns a corresponding path name. If the given resource is not found, raise a ``LookupError``, whose message gives a pointer to the installation instructions for the NLTK downloader. Zip File Handling: - If ``resource_name`` contains a component with a ``.zip`` extension, then it is assumed to be a zipfile; and the remaining path components are used to look inside the zipfile. - If any element of ``nltk.data.path`` has a ``.zip`` extension, then it is assumed to be a zipfile. - If a given resource name that does not contain any zipfile component is not found initially, then ``find()`` will make a second attempt to find that resource, by replacing each component *p* in the path with *p.zip/p*. For example, this allows ``find()`` to map the resource name ``corpora/chat80/cities.pl`` to a zip file path pointer to ``corpora/chat80.zip/chat80/cities.pl``. - When using ``find()`` to locate a directory contained in a zipfile, the resource name must end with the forward slash character. Otherwise, ``find()`` will not locate the directory. :type resource_name: str or unicode :param resource_name: The name of the resource to search for. Resource names are posix-style relative path names, such as ``corpora/brown``. Directory names will be automatically converted to a platform-appropriate path separator. :rtype: str """ resource_name = normalize_resource_name(resource_name, True) # Resolve default paths at runtime in-case the user overrides # nltk.data.path if paths is None: paths = path # Check if the resource name includes a zipfile name m = re.match(r"(.*\.zip)/?(.*)$|", resource_name) zipfile, zipentry = m.groups() # Check each item in our path for path_ in paths: # Is the path item a zipfile? if path_ and (os.path.isfile(path_) and path_.endswith(".zip")): try: return ZipFilePathPointer(path_, resource_name) except OSError: # resource not in zipfile continue # Is the path item a directory or is resource_name an absolute path? elif not path_ or os.path.isdir(path_): if zipfile is None: p = os.path.join(path_, url2pathname(resource_name)) if os.path.exists(p): if p.endswith(".gz"): return GzipFileSystemPathPointer(p) else: return FileSystemPathPointer(p) else: p = os.path.join(path_, url2pathname(zipfile)) if os.path.exists(p): try: return ZipFilePathPointer(p, zipentry) except OSError: # resource not in zipfile continue # Fallback: if the path doesn't include a zip file, then try # again, assuming that one of the path components is inside a # zipfile of the same name. if zipfile is None: pieces = resource_name.split("/") for i in range(len(pieces)): modified_name = "/".join(pieces[:i] + [pieces[i] + ".zip"] + pieces[i:]) try: return find(modified_name, paths) except LookupError: pass # Identify the package (i.e. the .zip file) to download. resource_zipname = resource_name.split("/")[1] if resource_zipname.endswith(".zip"): resource_zipname = resource_zipname.rpartition(".")[0] # Display a friendly error message if the resource wasn't found: msg = str( "Resource \33[93m{resource}\033[0m not found.\n" "Please use the NLTK Downloader to obtain the resource:\n\n" "\33[31m" # To display red text in terminal. ">>> import nltk\n" ">>> nltk.download('{resource}')\n" "\033[0m" ).format(resource=resource_zipname) msg = textwrap_indent(msg) msg += "\n For more information see: https://www.nltk.org/data.html\n" msg += "\n Attempted to load \33[93m{resource_name}\033[0m\n".format( resource_name=resource_name ) msg += "\n Searched in:" + "".join("\n - %r" % d for d in paths) sep = "*" * 70 resource_not_found = f"\n{sep}\n{msg}\n{sep}\n" raise LookupError(resource_not_found) def islice(iterable: Iterable[_T], stop: Optional[int]) -> Iterator[_T]: ... def islice(iterable: Iterable[_T], start: Optional[int], stop: Optional[int], step: Optional[int] = ...) -> Iterator[_T]: ... def demo(): from itertools import islice # zip_path = find('corpora/toolbox.zip') # lexicon = ToolboxData(ZipFilePathPointer(zip_path, 'toolbox/rotokas.dic')).parse() file_path = find("corpora/toolbox/rotokas.dic") lexicon = ToolboxData(file_path).parse() print("first field in fourth record:") print(lexicon[3][0].tag) print(lexicon[3][0].text) print("\nfields in sequential order:") for field in islice(lexicon.find("record"), 10): print(field.tag, field.text) print("\nlx fields:") for field in islice(lexicon.findall("record/lx"), 10): print(field.text) settings = ToolboxSettings() file_path = find("corpora/toolbox/MDF/MDF_AltH.typ") settings.open(file_path) # settings.open(ZipFilePathPointer(zip_path, entry='toolbox/MDF/MDF_AltH.typ')) tree = settings.parse(unwrap=False, encoding="cp1252") print(tree.find("expset/expMDF/rtfPageSetup/paperSize").text) settings_tree = ElementTree(tree) print(to_settings_string(settings_tree).encode("utf8"))
null
170,667
import array import math import random import warnings from abc import ABCMeta, abstractmethod from collections import Counter, defaultdict from functools import reduce from nltk.internals import raise_unorderable_types _NINF = float("-1e300") def add_logs(logx, logy): def reduce(function: Callable[[_T, _S], _T], sequence: Iterable[_S], initial: _T) -> _T: def reduce(function: Callable[[_T, _T], _T], sequence: Iterable[_T]) -> _T: def sum_logs(logs): return reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF
null
170,668
import array import math import random import warnings from abc import ABCMeta, abstractmethod from collections import Counter, defaultdict from functools import reduce from nltk.internals import raise_unorderable_types def _get_kwarg(kwargs, key, default): if key in kwargs: arg = kwargs[key] del kwargs[key] else: arg = default return arg
null
170,669
import array import math import random import warnings from abc import ABCMeta, abstractmethod from collections import Counter, defaultdict from functools import reduce from nltk.internals import raise_unorderable_types class FreqDist(Counter): """ A frequency distribution for the outcomes of an experiment. A frequency distribution records the number of times each outcome of an experiment has occurred. For example, a frequency distribution could be used to record the frequency of each word type in a document. Formally, a frequency distribution can be defined as a function mapping from each sample to the number of times that sample occurred as an outcome. Frequency distributions are generally constructed by running a number of experiments, and incrementing the count for a sample every time it is an outcome of an experiment. For example, the following code will produce a frequency distribution that encodes how often each word occurs in a text: >>> from nltk.tokenize import word_tokenize >>> from nltk.probability import FreqDist >>> sent = 'This is an example sentence' >>> fdist = FreqDist() >>> for word in word_tokenize(sent): ... fdist[word.lower()] += 1 An equivalent way to do this is with the initializer: >>> fdist = FreqDist(word.lower() for word in word_tokenize(sent)) """ def __init__(self, samples=None): """ Construct a new frequency distribution. If ``samples`` is given, then the frequency distribution will be initialized with the count of each object in ``samples``; otherwise, it will be initialized to be empty. In particular, ``FreqDist()`` returns an empty frequency distribution; and ``FreqDist(samples)`` first creates an empty frequency distribution, and then calls ``update`` with the list ``samples``. :param samples: The samples to initialize the frequency distribution with. :type samples: Sequence """ Counter.__init__(self, samples) # Cached number of samples in this FreqDist self._N = None def N(self): """ Return the total number of sample outcomes that have been recorded by this FreqDist. For the number of unique sample values (or bins) with counts greater than zero, use ``FreqDist.B()``. :rtype: int """ if self._N is None: # Not already cached, or cache has been invalidated self._N = sum(self.values()) return self._N def __setitem__(self, key, val): """ Override ``Counter.__setitem__()`` to invalidate the cached N """ self._N = None super().__setitem__(key, val) def __delitem__(self, key): """ Override ``Counter.__delitem__()`` to invalidate the cached N """ self._N = None super().__delitem__(key) def update(self, *args, **kwargs): """ Override ``Counter.update()`` to invalidate the cached N """ self._N = None super().update(*args, **kwargs) def setdefault(self, key, val): """ Override ``Counter.setdefault()`` to invalidate the cached N """ self._N = None super().setdefault(key, val) def B(self): """ Return the total number of sample values (or "bins") that have counts greater than zero. For the total number of sample outcomes recorded, use ``FreqDist.N()``. (FreqDist.B() is the same as len(FreqDist).) :rtype: int """ return len(self) def hapaxes(self): """ Return a list of all samples that occur once (hapax legomena) :rtype: list """ return [item for item in self if self[item] == 1] def Nr(self, r, bins=None): return self.r_Nr(bins)[r] def r_Nr(self, bins=None): """ Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0. :type bins: int :param bins: The number of possible sample outcomes. ``bins`` is used to calculate Nr(0). In particular, Nr(0) is ``bins-self.B()``. If ``bins`` is not specified, it defaults to ``self.B()`` (so Nr(0) will be 0). :rtype: int """ _r_Nr = defaultdict(int) for count in self.values(): _r_Nr[count] += 1 # Special case for Nr[0]: _r_Nr[0] = bins - self.B() if bins is not None else 0 return _r_Nr def _cumulative_frequencies(self, samples): """ Return the cumulative frequencies of the specified samples. If no samples are specified, all counts are returned, starting with the largest. :param samples: the samples whose frequencies should be returned. :type samples: any :rtype: list(float) """ cf = 0.0 for sample in samples: cf += self[sample] yield cf # slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs, # here, freq() does probs def freq(self, sample): """ Return the frequency of a given sample. The frequency of a sample is defined as the count of that sample divided by the total number of sample outcomes that have been recorded by this FreqDist. The count of a sample is defined as the number of times that sample outcome was recorded by this FreqDist. Frequencies are always real numbers in the range [0, 1]. :param sample: the sample whose frequency should be returned. :type sample: any :rtype: float """ n = self.N() if n == 0: return 0 return self[sample] / n def max(self): """ Return the sample with the greatest number of outcomes in this frequency distribution. If two or more samples have the same number of outcomes, return one of them; which sample is returned is undefined. If no outcomes have occurred in this frequency distribution, return None. :return: The sample with the maximum number of outcomes in this frequency distribution. :rtype: any or None """ if len(self) == 0: raise ValueError( "A FreqDist must have at least one sample before max is defined." ) return self.most_common(1)[0][0] def plot( self, *args, title="", cumulative=False, percents=False, show=True, **kwargs ): """ Plot samples from the frequency distribution displaying the most frequent sample first. If an integer parameter is supplied, stop after this many samples have been plotted. For a cumulative plot, specify cumulative=True. Additional ``**kwargs`` are passed to matplotlib's plot function. (Requires Matplotlib to be installed.) :param title: The title for the graph. :type title: str :param cumulative: Whether the plot is cumulative. (default = False) :type cumulative: bool :param percents: Whether the plot uses percents instead of counts. (default = False) :type percents: bool :param show: Whether to show the plot, or only return the ax. :type show: bool """ try: import matplotlib.pyplot as plt except ImportError as e: raise ValueError( "The plot function requires matplotlib to be installed." "See https://matplotlib.org/" ) from e if len(args) == 0: args = [len(self)] samples = [item for item, _ in self.most_common(*args)] if cumulative: freqs = list(self._cumulative_frequencies(samples)) ylabel = "Cumulative " else: freqs = [self[sample] for sample in samples] ylabel = "" if percents: freqs = [f / self.N() * 100 for f in freqs] ylabel += "Percents" else: ylabel += "Counts" ax = plt.gca() ax.grid(True, color="silver") if "linewidth" not in kwargs: kwargs["linewidth"] = 2 if title: ax.set_title(title) ax.plot(freqs, **kwargs) ax.set_xticks(range(len(samples))) ax.set_xticklabels([str(s) for s in samples], rotation=90) ax.set_xlabel("Samples") ax.set_ylabel(ylabel) if show: plt.show() return ax def tabulate(self, *args, **kwargs): """ Tabulate the given samples from the frequency distribution (cumulative), displaying the most frequent sample first. If an integer parameter is supplied, stop after this many samples have been plotted. :param samples: The samples to plot (default is all samples) :type samples: list :param cumulative: A flag to specify whether the freqs are cumulative (default = False) :type title: bool """ if len(args) == 0: args = [len(self)] samples = _get_kwarg( kwargs, "samples", [item for item, _ in self.most_common(*args)] ) cumulative = _get_kwarg(kwargs, "cumulative", False) if cumulative: freqs = list(self._cumulative_frequencies(samples)) else: freqs = [self[sample] for sample in samples] # percents = [f * 100 for f in freqs] only in ProbDist? width = max(len(f"{s}") for s in samples) width = max(width, max(len("%d" % f) for f in freqs)) for i in range(len(samples)): print("%*s" % (width, samples[i]), end=" ") print() for i in range(len(samples)): print("%*d" % (width, freqs[i]), end=" ") print() def copy(self): """ Create a copy of this frequency distribution. :rtype: FreqDist """ return self.__class__(self) # Mathematical operatiors def __add__(self, other): """ Add counts from two counters. >>> FreqDist('abbb') + FreqDist('bcc') FreqDist({'b': 4, 'c': 2, 'a': 1}) """ return self.__class__(super().__add__(other)) def __sub__(self, other): """ Subtract count, but keep only results with positive counts. >>> FreqDist('abbbc') - FreqDist('bccd') FreqDist({'b': 2, 'a': 1}) """ return self.__class__(super().__sub__(other)) def __or__(self, other): """ Union is the maximum of value in either of the input counters. >>> FreqDist('abbb') | FreqDist('bcc') FreqDist({'b': 3, 'c': 2, 'a': 1}) """ return self.__class__(super().__or__(other)) def __and__(self, other): """ Intersection is the minimum of corresponding counts. >>> FreqDist('abbb') & FreqDist('bcc') FreqDist({'b': 1}) """ return self.__class__(super().__and__(other)) def __le__(self, other): """ Returns True if this frequency distribution is a subset of the other and for no key the value exceeds the value of the same key from the other frequency distribution. The <= operator forms partial order and satisfying the axioms reflexivity, antisymmetry and transitivity. >>> FreqDist('a') <= FreqDist('a') True >>> a = FreqDist('abc') >>> b = FreqDist('aabc') >>> (a <= b, b <= a) (True, False) >>> FreqDist('a') <= FreqDist('abcd') True >>> FreqDist('abc') <= FreqDist('xyz') False >>> FreqDist('xyz') <= FreqDist('abc') False >>> c = FreqDist('a') >>> d = FreqDist('aa') >>> e = FreqDist('aaa') >>> c <= d and d <= e and c <= e True """ if not isinstance(other, FreqDist): raise_unorderable_types("<=", self, other) return set(self).issubset(other) and all( self[key] <= other[key] for key in self ) def __ge__(self, other): if not isinstance(other, FreqDist): raise_unorderable_types(">=", self, other) return set(self).issuperset(other) and all( self[key] >= other[key] for key in other ) __lt__ = lambda self, other: self <= other and not self == other __gt__ = lambda self, other: self >= other and not self == other def __repr__(self): """ Return a string representation of this FreqDist. :rtype: string """ return self.pformat() def pprint(self, maxlen=10, stream=None): """ Print a string representation of this FreqDist to 'stream' :param maxlen: The maximum number of items to print :type maxlen: int :param stream: The stream to print to. stdout by default """ print(self.pformat(maxlen=maxlen), file=stream) def pformat(self, maxlen=10): """ Return a string representation of this FreqDist. :param maxlen: The maximum number of items to display :type maxlen: int :rtype: string """ items = ["{!r}: {!r}".format(*item) for item in self.most_common(maxlen)] if len(self) > maxlen: items.append("...") return "FreqDist({{{0}}})".format(", ".join(items)) def __str__(self): """ Return a string representation of this FreqDist. :rtype: string """ return "<FreqDist with %d samples and %d outcomes>" % (len(self), self.N()) def __iter__(self): """ Return an iterator which yields tokens ordered by frequency. :rtype: iterator """ for token, _ in self.most_common(self.B()): yield token class MLEProbDist(ProbDistI): """ The maximum likelihood estimate for the probability distribution of the experiment used to generate a frequency distribution. The "maximum likelihood estimate" approximates the probability of each sample as the frequency of that sample in the frequency distribution. """ def __init__(self, freqdist, bins=None): """ Use the maximum likelihood estimate to create a probability distribution for the experiment used to generate ``freqdist``. :type freqdist: FreqDist :param freqdist: The frequency distribution that the probability estimates should be based on. """ self._freqdist = freqdist def freqdist(self): """ Return the frequency distribution that this probability distribution is based on. :rtype: FreqDist """ return self._freqdist def prob(self, sample): return self._freqdist.freq(sample) def max(self): return self._freqdist.max() def samples(self): return self._freqdist.keys() def __repr__(self): """ :rtype: str :return: A string representation of this ``ProbDist``. """ return "<MLEProbDist based on %d samples>" % self._freqdist.N() class LidstoneProbDist(ProbDistI): """ The Lidstone estimate for the probability distribution of the experiment used to generate a frequency distribution. The "Lidstone estimate" is parameterized by a real number *gamma*, which typically ranges from 0 to 1. The Lidstone estimate approximates the probability of a sample with count *c* from an experiment with *N* outcomes and *B* bins as ``c+gamma)/(N+B*gamma)``. This is equivalent to adding *gamma* to the count for each bin, and taking the maximum likelihood estimate of the resulting frequency distribution. """ SUM_TO_ONE = False def __init__(self, freqdist, gamma, bins=None): """ Use the Lidstone estimate to create a probability distribution for the experiment used to generate ``freqdist``. :type freqdist: FreqDist :param freqdist: The frequency distribution that the probability estimates should be based on. :type gamma: float :param gamma: A real number used to parameterize the estimate. The Lidstone estimate is equivalent to adding *gamma* to the count for each bin, and taking the maximum likelihood estimate of the resulting frequency distribution. :type bins: int :param bins: The number of sample values that can be generated by the experiment that is described by the probability distribution. This value must be correctly set for the probabilities of the sample values to sum to one. If ``bins`` is not specified, it defaults to ``freqdist.B()``. """ if (bins == 0) or (bins is None and freqdist.N() == 0): name = self.__class__.__name__[:-8] raise ValueError( "A %s probability distribution " % name + "must have at least one bin." ) if (bins is not None) and (bins < freqdist.B()): name = self.__class__.__name__[:-8] raise ValueError( "\nThe number of bins in a %s distribution " % name + "(%d) must be greater than or equal to\n" % bins + "the number of bins in the FreqDist used " + "to create it (%d)." % freqdist.B() ) self._freqdist = freqdist self._gamma = float(gamma) self._N = self._freqdist.N() if bins is None: bins = freqdist.B() self._bins = bins self._divisor = self._N + bins * gamma if self._divisor == 0.0: # In extreme cases we force the probability to be 0, # which it will be, since the count will be 0: self._gamma = 0 self._divisor = 1 def freqdist(self): """ Return the frequency distribution that this probability distribution is based on. :rtype: FreqDist """ return self._freqdist def prob(self, sample): c = self._freqdist[sample] return (c + self._gamma) / self._divisor def max(self): # For Lidstone distributions, probability is monotonic with # frequency, so the most probable sample is the one that # occurs most frequently. return self._freqdist.max() def samples(self): return self._freqdist.keys() def discount(self): gb = self._gamma * self._bins return gb / (self._N + gb) def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return "<LidstoneProbDist based on %d samples>" % self._freqdist.N() class HeldoutProbDist(ProbDistI): """ The heldout estimate for the probability distribution of the experiment used to generate two frequency distributions. These two frequency distributions are called the "heldout frequency distribution" and the "base frequency distribution." The "heldout estimate" uses uses the "heldout frequency distribution" to predict the probability of each sample, given its frequency in the "base frequency distribution". In particular, the heldout estimate approximates the probability for a sample that occurs *r* times in the base distribution as the average frequency in the heldout distribution of all samples that occur *r* times in the base distribution. This average frequency is *Tr[r]/(Nr[r].N)*, where: - *Tr[r]* is the total count in the heldout distribution for all samples that occur *r* times in the base distribution. - *Nr[r]* is the number of samples that occur *r* times in the base distribution. - *N* is the number of outcomes recorded by the heldout frequency distribution. In order to increase the efficiency of the ``prob`` member function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r* when the ``HeldoutProbDist`` is created. :type _estimate: list(float) :ivar _estimate: A list mapping from *r*, the number of times that a sample occurs in the base distribution, to the probability estimate for that sample. ``_estimate[r]`` is calculated by finding the average frequency in the heldout distribution of all samples that occur *r* times in the base distribution. In particular, ``_estimate[r]`` = *Tr[r]/(Nr[r].N)*. :type _max_r: int :ivar _max_r: The maximum number of times that any sample occurs in the base distribution. ``_max_r`` is used to decide how large ``_estimate`` must be. """ SUM_TO_ONE = False def __init__(self, base_fdist, heldout_fdist, bins=None): """ Use the heldout estimate to create a probability distribution for the experiment used to generate ``base_fdist`` and ``heldout_fdist``. :type base_fdist: FreqDist :param base_fdist: The base frequency distribution. :type heldout_fdist: FreqDist :param heldout_fdist: The heldout frequency distribution. :type bins: int :param bins: The number of sample values that can be generated by the experiment that is described by the probability distribution. This value must be correctly set for the probabilities of the sample values to sum to one. If ``bins`` is not specified, it defaults to ``freqdist.B()``. """ self._base_fdist = base_fdist self._heldout_fdist = heldout_fdist # The max number of times any sample occurs in base_fdist. self._max_r = base_fdist[base_fdist.max()] # Calculate Tr, Nr, and N. Tr = self._calculate_Tr() r_Nr = base_fdist.r_Nr(bins) Nr = [r_Nr[r] for r in range(self._max_r + 1)] N = heldout_fdist.N() # Use Tr, Nr, and N to compute the probability estimate for # each value of r. self._estimate = self._calculate_estimate(Tr, Nr, N) def _calculate_Tr(self): """ Return the list *Tr*, where *Tr[r]* is the total count in ``heldout_fdist`` for all samples that occur *r* times in ``base_fdist``. :rtype: list(float) """ Tr = [0.0] * (self._max_r + 1) for sample in self._heldout_fdist: r = self._base_fdist[sample] Tr[r] += self._heldout_fdist[sample] return Tr def _calculate_estimate(self, Tr, Nr, N): """ Return the list *estimate*, where *estimate[r]* is the probability estimate for any sample that occurs *r* times in the base frequency distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*. In the special case that *N[r]=0*, *estimate[r]* will never be used; so we define *estimate[r]=None* for those cases. :rtype: list(float) :type Tr: list(float) :param Tr: the list *Tr*, where *Tr[r]* is the total count in the heldout distribution for all samples that occur *r* times in base distribution. :type Nr: list(float) :param Nr: The list *Nr*, where *Nr[r]* is the number of samples that occur *r* times in the base distribution. :type N: int :param N: The total number of outcomes recorded by the heldout frequency distribution. """ estimate = [] for r in range(self._max_r + 1): if Nr[r] == 0: estimate.append(None) else: estimate.append(Tr[r] / (Nr[r] * N)) return estimate def base_fdist(self): """ Return the base frequency distribution that this probability distribution is based on. :rtype: FreqDist """ return self._base_fdist def heldout_fdist(self): """ Return the heldout frequency distribution that this probability distribution is based on. :rtype: FreqDist """ return self._heldout_fdist def samples(self): return self._base_fdist.keys() def prob(self, sample): # Use our precomputed probability estimate. r = self._base_fdist[sample] return self._estimate[r] def max(self): # Note: the Heldout estimation is *not* necessarily monotonic; # so this implementation is currently broken. However, it # should give the right answer *most* of the time. :) return self._base_fdist.max() def discount(self): raise NotImplementedError() def __repr__(self): """ :rtype: str :return: A string representation of this ``ProbDist``. """ s = "<HeldoutProbDist: %d base samples; %d heldout samples>" return s % (self._base_fdist.N(), self._heldout_fdist.N()) class CrossValidationProbDist(ProbDistI): """ The cross-validation estimate for the probability distribution of the experiment used to generate a set of frequency distribution. The "cross-validation estimate" for the probability of a sample is found by averaging the held-out estimates for the sample in each pair of frequency distributions. """ SUM_TO_ONE = False def __init__(self, freqdists, bins): """ Use the cross-validation estimate to create a probability distribution for the experiment used to generate ``freqdists``. :type freqdists: list(FreqDist) :param freqdists: A list of the frequency distributions generated by the experiment. :type bins: int :param bins: The number of sample values that can be generated by the experiment that is described by the probability distribution. This value must be correctly set for the probabilities of the sample values to sum to one. If ``bins`` is not specified, it defaults to ``freqdist.B()``. """ self._freqdists = freqdists # Create a heldout probability distribution for each pair of # frequency distributions in freqdists. self._heldout_probdists = [] for fdist1 in freqdists: for fdist2 in freqdists: if fdist1 is not fdist2: probdist = HeldoutProbDist(fdist1, fdist2, bins) self._heldout_probdists.append(probdist) def freqdists(self): """ Return the list of frequency distributions that this ``ProbDist`` is based on. :rtype: list(FreqDist) """ return self._freqdists def samples(self): # [xx] nb: this is not too efficient return set(sum((list(fd) for fd in self._freqdists), [])) def prob(self, sample): # Find the average probability estimate returned by each # heldout distribution. prob = 0.0 for heldout_probdist in self._heldout_probdists: prob += heldout_probdist.prob(sample) return prob / len(self._heldout_probdists) def discount(self): raise NotImplementedError() def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return "<CrossValidationProbDist: %d-way>" % len(self._freqdists) class SimpleGoodTuringProbDist(ProbDistI): """ SimpleGoodTuring ProbDist approximates from frequency to frequency of frequency into a linear line under log space by linear regression. Details of Simple Good-Turing algorithm can be found in: - Good Turing smoothing without tears" (Gale & Sampson 1995), Journal of Quantitative Linguistics, vol. 2 pp. 217-237. - "Speech and Language Processing (Jurafsky & Martin), 2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c)) - https://www.grsampson.net/RGoodTur.html Given a set of pair (xi, yi), where the xi denotes the frequency and yi denotes the frequency of frequency, we want to minimize their square variation. E(x) and E(y) represent the mean of xi and yi. - slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x))) - intercept: a = E(y) - b.E(x) """ SUM_TO_ONE = False def __init__(self, freqdist, bins=None): """ :param freqdist: The frequency counts upon which to base the estimation. :type freqdist: FreqDist :param bins: The number of possible event types. This must be larger than the number of bins in the ``freqdist``. If None, then it's assumed to be equal to ``freqdist``.B() + 1 :type bins: int """ assert ( bins is None or bins > freqdist.B() ), "bins parameter must not be less than %d=freqdist.B()+1" % (freqdist.B() + 1) if bins is None: bins = freqdist.B() + 1 self._freqdist = freqdist self._bins = bins r, nr = self._r_Nr() self.find_best_fit(r, nr) self._switch(r, nr) self._renormalize(r, nr) def _r_Nr_non_zero(self): r_Nr = self._freqdist.r_Nr() del r_Nr[0] return r_Nr def _r_Nr(self): """ Split the frequency distribution in two list (r, Nr), where Nr(r) > 0 """ nonzero = self._r_Nr_non_zero() if not nonzero: return [], [] return zip(*sorted(nonzero.items())) def find_best_fit(self, r, nr): """ Use simple linear regression to tune parameters self._slope and self._intercept in the log-log space based on count and Nr(count) (Work in log space to avoid floating point underflow.) """ # For higher sample frequencies the data points becomes horizontal # along line Nr=1. To create a more evident linear model in log-log # space, we average positive Nr values with the surrounding zero # values. (Church and Gale, 1991) if not r or not nr: # Empty r or nr? return zr = [] for j in range(len(r)): i = r[j - 1] if j > 0 else 0 k = 2 * r[j] - i if j == len(r) - 1 else r[j + 1] zr_ = 2.0 * nr[j] / (k - i) zr.append(zr_) log_r = [math.log(i) for i in r] log_zr = [math.log(i) for i in zr] xy_cov = x_var = 0.0 x_mean = sum(log_r) / len(log_r) y_mean = sum(log_zr) / len(log_zr) for (x, y) in zip(log_r, log_zr): xy_cov += (x - x_mean) * (y - y_mean) x_var += (x - x_mean) ** 2 self._slope = xy_cov / x_var if x_var != 0 else 0.0 if self._slope >= -1: warnings.warn( "SimpleGoodTuring did not find a proper best fit " "line for smoothing probabilities of occurrences. " "The probability estimates are likely to be " "unreliable." ) self._intercept = y_mean - self._slope * x_mean def _switch(self, r, nr): """ Calculate the r frontier where we must switch from Nr to Sr when estimating E[Nr]. """ for i, r_ in enumerate(r): if len(r) == i + 1 or r[i + 1] != r_ + 1: # We are at the end of r, or there is a gap in r self._switch_at = r_ break Sr = self.smoothedNr smooth_r_star = (r_ + 1) * Sr(r_ + 1) / Sr(r_) unsmooth_r_star = (r_ + 1) * nr[i + 1] / nr[i] std = math.sqrt(self._variance(r_, nr[i], nr[i + 1])) if abs(unsmooth_r_star - smooth_r_star) <= 1.96 * std: self._switch_at = r_ break def _variance(self, r, nr, nr_1): r = float(r) nr = float(nr) nr_1 = float(nr_1) return (r + 1.0) ** 2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr) def _renormalize(self, r, nr): """ It is necessary to renormalize all the probability estimates to ensure a proper probability distribution results. This can be done by keeping the estimate of the probability mass for unseen items as N(1)/N and renormalizing all the estimates for previously seen items (as Gale and Sampson (1995) propose). (See M&S P.213, 1999) """ prob_cov = 0.0 for r_, nr_ in zip(r, nr): prob_cov += nr_ * self._prob_measure(r_) if prob_cov: self._renormal = (1 - self._prob_measure(0)) / prob_cov def smoothedNr(self, r): """ Return the number of samples with count r. :param r: The amount of frequency. :type r: int :rtype: float """ # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic # relationship) # Estimate a and b by simple linear regression technique on # the logarithmic form of the equation: log Nr = a + b*log(r) return math.exp(self._intercept + self._slope * math.log(r)) def prob(self, sample): """ Return the sample's probability. :param sample: sample of the event :type sample: str :rtype: float """ count = self._freqdist[sample] p = self._prob_measure(count) if count == 0: if self._bins == self._freqdist.B(): p = 0.0 else: p = p / (self._bins - self._freqdist.B()) else: p = p * self._renormal return p def _prob_measure(self, count): if count == 0 and self._freqdist.N() == 0: return 1.0 elif count == 0 and self._freqdist.N() != 0: return self._freqdist.Nr(1) / self._freqdist.N() if self._switch_at > count: Er_1 = self._freqdist.Nr(count + 1) Er = self._freqdist.Nr(count) else: Er_1 = self.smoothedNr(count + 1) Er = self.smoothedNr(count) r_star = (count + 1) * Er_1 / Er return r_star / self._freqdist.N() def check(self): prob_sum = 0.0 for i in range(0, len(self._Nr)): prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal print("Probability Sum:", prob_sum) # assert prob_sum != 1.0, "probability sum should be one!" def discount(self): """ This function returns the total mass of probability transfers from the seen samples to the unseen samples. """ return self.smoothedNr(1) / self._freqdist.N() def max(self): return self._freqdist.max() def samples(self): return self._freqdist.keys() def freqdist(self): return self._freqdist def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return "<SimpleGoodTuringProbDist based on %d samples>" % self._freqdist.N() def _create_rand_fdist(numsamples, numoutcomes): """ Create a new frequency distribution, with random samples. The samples are numbers from 1 to ``numsamples``, and are generated by summing two numbers, each of which has a uniform distribution. """ fdist = FreqDist() for x in range(numoutcomes): y = random.randint(1, (1 + numsamples) // 2) + random.randint( 0, numsamples // 2 ) fdist[y] += 1 return fdist def _create_sum_pdist(numsamples): """ Return the true probability distribution for the experiment ``_create_rand_fdist(numsamples, x)``. """ fdist = FreqDist() for x in range(1, (1 + numsamples) // 2 + 1): for y in range(0, numsamples // 2 + 1): fdist[x + y] += 1 return MLEProbDist(fdist) The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo(numsamples=6, numoutcomes=500)` to solve the following problem: A demonstration of frequency distributions and probability distributions. This demonstration creates three frequency distributions with, and uses them to sample a random process with ``numsamples`` samples. Each frequency distribution is sampled ``numoutcomes`` times. These three frequency distributions are then used to build six probability distributions. Finally, the probability estimates of these distributions are compared to the actual probability of each sample. :type numsamples: int :param numsamples: The number of samples to use in each demo frequency distributions. :type numoutcomes: int :param numoutcomes: The total number of outcomes for each demo frequency distribution. These outcomes are divided into ``numsamples`` bins. :rtype: None Here is the function: def demo(numsamples=6, numoutcomes=500): """ A demonstration of frequency distributions and probability distributions. This demonstration creates three frequency distributions with, and uses them to sample a random process with ``numsamples`` samples. Each frequency distribution is sampled ``numoutcomes`` times. These three frequency distributions are then used to build six probability distributions. Finally, the probability estimates of these distributions are compared to the actual probability of each sample. :type numsamples: int :param numsamples: The number of samples to use in each demo frequency distributions. :type numoutcomes: int :param numoutcomes: The total number of outcomes for each demo frequency distribution. These outcomes are divided into ``numsamples`` bins. :rtype: None """ # Randomly sample a stochastic process three times. fdist1 = _create_rand_fdist(numsamples, numoutcomes) fdist2 = _create_rand_fdist(numsamples, numoutcomes) fdist3 = _create_rand_fdist(numsamples, numoutcomes) # Use our samples to create probability distributions. pdists = [ MLEProbDist(fdist1), LidstoneProbDist(fdist1, 0.5, numsamples), HeldoutProbDist(fdist1, fdist2, numsamples), HeldoutProbDist(fdist2, fdist1, numsamples), CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples), SimpleGoodTuringProbDist(fdist1), SimpleGoodTuringProbDist(fdist1, 7), _create_sum_pdist(numsamples), ] # Find the probability of each sample. vals = [] for n in range(1, numsamples + 1): vals.append(tuple([n, fdist1.freq(n)] + [pdist.prob(n) for pdist in pdists])) # Print the results in a formatted table. print( "%d samples (1-%d); %d outcomes were sampled for each FreqDist" % (numsamples, numsamples, numoutcomes) ) print("=" * 9 * (len(pdists) + 2)) FORMATSTR = " FreqDist " + "%8s " * (len(pdists) - 1) + "| Actual" print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1])) print("-" * 9 * (len(pdists) + 2)) FORMATSTR = "%3d %8.6f " + "%8.6f " * (len(pdists) - 1) + "| %8.6f" for val in vals: print(FORMATSTR % val) # Print the totals for each column (should all be 1.0) zvals = list(zip(*vals)) sums = [sum(val) for val in zvals[1:]] print("-" * 9 * (len(pdists) + 2)) FORMATSTR = "Total " + "%8.6f " * (len(pdists)) + "| %8.6f" print(FORMATSTR % tuple(sums)) print("=" * 9 * (len(pdists) + 2)) # Display the distributions themselves, if they're short enough. if len("%s" % fdist1) < 70: print(" fdist1: %s" % fdist1) print(" fdist2: %s" % fdist2) print(" fdist3: %s" % fdist3) print() print("Generating:") for pdist in pdists: fdist = FreqDist(pdist.generate() for i in range(5000)) print("{:>20} {}".format(pdist.__class__.__name__[:20], ("%s" % fdist)[:55])) print()
A demonstration of frequency distributions and probability distributions. This demonstration creates three frequency distributions with, and uses them to sample a random process with ``numsamples`` samples. Each frequency distribution is sampled ``numoutcomes`` times. These three frequency distributions are then used to build six probability distributions. Finally, the probability estimates of these distributions are compared to the actual probability of each sample. :type numsamples: int :param numsamples: The number of samples to use in each demo frequency distributions. :type numoutcomes: int :param numoutcomes: The total number of outcomes for each demo frequency distribution. These outcomes are divided into ``numsamples`` bins. :rtype: None
170,670
import array import math import random import warnings from abc import ABCMeta, abstractmethod from collections import Counter, defaultdict from functools import reduce from nltk.internals import raise_unorderable_types class FreqDist(Counter): def __init__(self, samples=None): def N(self): def __setitem__(self, key, val): def __delitem__(self, key): def update(self, *args, **kwargs): def setdefault(self, key, val): def B(self): def hapaxes(self): def Nr(self, r, bins=None): def r_Nr(self, bins=None): def _cumulative_frequencies(self, samples): def freq(self, sample): def max(self): def plot( self, *args, title="", cumulative=False, percents=False, show=True, **kwargs ): def tabulate(self, *args, **kwargs): def copy(self): def __add__(self, other): def __sub__(self, other): def __or__(self, other): def __and__(self, other): def __le__(self, other): def __ge__(self, other): def __repr__(self): def pprint(self, maxlen=10, stream=None): def pformat(self, maxlen=10): def __str__(self): def __iter__(self): class SimpleGoodTuringProbDist(ProbDistI): def __init__(self, freqdist, bins=None): def _r_Nr_non_zero(self): def _r_Nr(self): def find_best_fit(self, r, nr): def _switch(self, r, nr): def _variance(self, r, nr, nr_1): def _renormalize(self, r, nr): def smoothedNr(self, r): def prob(self, sample): def _prob_measure(self, count): def check(self): def discount(self): def max(self): def samples(self): def freqdist(self): def __repr__(self): def gt_demo(): from nltk import corpus emma_words = corpus.gutenberg.words("austen-emma.txt") fd = FreqDist(emma_words) sgt = SimpleGoodTuringProbDist(fd) print("{:>18} {:>8} {:>14}".format("word", "frequency", "SimpleGoodTuring")) fd_keys_sorted = ( key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True) ) for key in fd_keys_sorted: print("%18s %8d %14e" % (key, fd[key], sgt.prob(key)))
null
170,671
import copy import random import sys try: import numpy except ImportError: pass from nltk.cluster.util import VectorSpaceClusterer class KMeansClusterer(VectorSpaceClusterer): """ The K-means clusterer starts with k arbitrary chosen means then allocates each vector to the cluster with the closest mean. It then recalculates the means of each cluster as the centroid of the vectors in the cluster. This process repeats until the cluster memberships stabilise. This is a hill-climbing algorithm which may converge to a local maximum. Hence the clustering is often repeated with random initial means and the most commonly occurring output means are chosen. """ def __init__( self, num_means, distance, repeats=1, conv_test=1e-6, initial_means=None, normalise=False, svd_dimensions=None, rng=None, avoid_empty_clusters=False, ): """ :param num_means: the number of means to use (may use fewer) :type num_means: int :param distance: measure of distance between two vectors :type distance: function taking two vectors and returning a float :param repeats: number of randomised clustering trials to use :type repeats: int :param conv_test: maximum variation in mean differences before deemed convergent :type conv_test: number :param initial_means: set of k initial means :type initial_means: sequence of vectors :param normalise: should vectors be normalised to length 1 :type normalise: boolean :param svd_dimensions: number of dimensions to use in reducing vector dimensionsionality with SVD :type svd_dimensions: int :param rng: random number generator (or None) :type rng: Random :param avoid_empty_clusters: include current centroid in computation of next one; avoids undefined behavior when clusters become empty :type avoid_empty_clusters: boolean """ VectorSpaceClusterer.__init__(self, normalise, svd_dimensions) self._num_means = num_means self._distance = distance self._max_difference = conv_test assert not initial_means or len(initial_means) == num_means self._means = initial_means assert repeats >= 1 assert not (initial_means and repeats > 1) self._repeats = repeats self._rng = rng if rng else random.Random() self._avoid_empty_clusters = avoid_empty_clusters def cluster_vectorspace(self, vectors, trace=False): if self._means and self._repeats > 1: print("Warning: means will be discarded for subsequent trials") meanss = [] for trial in range(self._repeats): if trace: print("k-means trial", trial) if not self._means or trial > 1: self._means = self._rng.sample(list(vectors), self._num_means) self._cluster_vectorspace(vectors, trace) meanss.append(self._means) if len(meanss) > 1: # sort the means first (so that different cluster numbering won't # effect the distance comparison) for means in meanss: means.sort(key=sum) # find the set of means that's minimally different from the others min_difference = min_means = None for i in range(len(meanss)): d = 0 for j in range(len(meanss)): if i != j: d += self._sum_distances(meanss[i], meanss[j]) if min_difference is None or d < min_difference: min_difference, min_means = d, meanss[i] # use the best means self._means = min_means def _cluster_vectorspace(self, vectors, trace=False): if self._num_means < len(vectors): # perform k-means clustering converged = False while not converged: # assign the tokens to clusters based on minimum distance to # the cluster means clusters = [[] for m in range(self._num_means)] for vector in vectors: index = self.classify_vectorspace(vector) clusters[index].append(vector) if trace: print("iteration") # for i in range(self._num_means): # print ' mean', i, 'allocated', len(clusters[i]), 'vectors' # recalculate cluster means by computing the centroid of each cluster new_means = list(map(self._centroid, clusters, self._means)) # measure the degree of change from the previous step for convergence difference = self._sum_distances(self._means, new_means) if difference < self._max_difference: converged = True # remember the new means self._means = new_means def classify_vectorspace(self, vector): # finds the closest cluster centroid # returns that cluster's index best_distance = best_index = None for index in range(len(self._means)): mean = self._means[index] dist = self._distance(vector, mean) if best_distance is None or dist < best_distance: best_index, best_distance = index, dist return best_index def num_clusters(self): if self._means: return len(self._means) else: return self._num_means def means(self): """ The means used for clustering. """ return self._means def _sum_distances(self, vectors1, vectors2): difference = 0.0 for u, v in zip(vectors1, vectors2): difference += self._distance(u, v) return difference def _centroid(self, cluster, mean): if self._avoid_empty_clusters: centroid = copy.copy(mean) for vector in cluster: centroid += vector return centroid / (1 + len(cluster)) else: if not len(cluster): sys.stderr.write("Error: no centroid defined for empty cluster.\n") sys.stderr.write( "Try setting argument 'avoid_empty_clusters' to True\n" ) assert False centroid = copy.copy(cluster[0]) for vector in cluster[1:]: centroid += vector return centroid / len(cluster) def __repr__(self): return "<KMeansClusterer means=%s repeats=%d>" % (self._means, self._repeats) def demo(): # example from figure 14.9, page 517, Manning and Schutze from nltk.cluster import KMeansClusterer, euclidean_distance vectors = [numpy.array(f) for f in [[2, 1], [1, 3], [4, 7], [6, 7]]] means = [[4, 3], [5, 5]] clusterer = KMeansClusterer(2, euclidean_distance, initial_means=means) clusters = clusterer.cluster(vectors, True, trace=True) print("Clustered:", vectors) print("As:", clusters) print("Means:", clusterer.means()) print() vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]] # test k-means using the euclidean distance metric, 2 means and repeat # clustering 10 times with random seeds clusterer = KMeansClusterer(2, euclidean_distance, repeats=10) clusters = clusterer.cluster(vectors, True) print("Clustered:", vectors) print("As:", clusters) print("Means:", clusterer.means()) print() # classify a new vector vector = numpy.array([3, 3]) print("classify(%s):" % vector, end=" ") print(clusterer.classify(vector)) print()
null
170,672
import copy from abc import abstractmethod from math import sqrt from sys import stdout try: import numpy except ImportError: pass from nltk.cluster.api import ClusterI def sqrt(__x: SupportsFloat) -> float: ... The provided code snippet includes necessary dependencies for implementing the `cosine_distance` function. Write a Python function `def cosine_distance(u, v)` to solve the following problem: Returns 1 minus the cosine of the angle between vectors v and u. This is equal to ``1 - (u.v / |u||v|)``. Here is the function: def cosine_distance(u, v): """ Returns 1 minus the cosine of the angle between vectors v and u. This is equal to ``1 - (u.v / |u||v|)``. """ return 1 - (numpy.dot(u, v) / (sqrt(numpy.dot(u, u)) * sqrt(numpy.dot(v, v))))
Returns 1 minus the cosine of the angle between vectors v and u. This is equal to ``1 - (u.v / |u||v|)``.
170,673
try: import numpy except ImportError: pass from nltk.cluster.util import Dendrogram, VectorSpaceClusterer, cosine_distance class GAAClusterer(VectorSpaceClusterer): """ The Group Average Agglomerative starts with each of the N vectors as singleton clusters. It then iteratively merges pairs of clusters which have the closest centroids. This continues until there is only one cluster. The order of merges gives rise to a dendrogram: a tree with the earlier merges lower than later merges. The membership of a given number of clusters c, 1 <= c <= N, can be found by cutting the dendrogram at depth c. This clusterer uses the cosine similarity metric only, which allows for efficient speed-up in the clustering process. """ def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None): VectorSpaceClusterer.__init__(self, normalise, svd_dimensions) self._num_clusters = num_clusters self._dendrogram = None self._groups_values = None def cluster(self, vectors, assign_clusters=False, trace=False): # stores the merge order self._dendrogram = Dendrogram( [numpy.array(vector, numpy.float64) for vector in vectors] ) return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace) def cluster_vectorspace(self, vectors, trace=False): # variables describing the initial situation N = len(vectors) cluster_len = [1] * N cluster_count = N index_map = numpy.arange(N) # construct the similarity matrix dims = (N, N) dist = numpy.ones(dims, dtype=float) * numpy.inf for i in range(N): for j in range(i + 1, N): dist[i, j] = cosine_distance(vectors[i], vectors[j]) while cluster_count > max(self._num_clusters, 1): i, j = numpy.unravel_index(dist.argmin(), dims) if trace: print("merging %d and %d" % (i, j)) # update similarities for merging i and j self._merge_similarities(dist, cluster_len, i, j) # remove j dist[:, j] = numpy.inf dist[j, :] = numpy.inf # merge the clusters cluster_len[i] = cluster_len[i] + cluster_len[j] self._dendrogram.merge(index_map[i], index_map[j]) cluster_count -= 1 # update the index map to reflect the indexes if we # had removed j index_map[j + 1 :] -= 1 index_map[j] = N self.update_clusters(self._num_clusters) def _merge_similarities(self, dist, cluster_len, i, j): # the new cluster i merged from i and j adopts the average of # i and j's similarity to each other cluster, weighted by the # number of points in the clusters i and j i_weight = cluster_len[i] j_weight = cluster_len[j] weight_sum = i_weight + j_weight # update for x<i dist[:i, i] = dist[:i, i] * i_weight + dist[:i, j] * j_weight dist[:i, i] /= weight_sum # update for i<x<j dist[i, i + 1 : j] = ( dist[i, i + 1 : j] * i_weight + dist[i + 1 : j, j] * j_weight ) # update for i<j<x dist[i, j + 1 :] = dist[i, j + 1 :] * i_weight + dist[j, j + 1 :] * j_weight dist[i, i + 1 :] /= weight_sum def update_clusters(self, num_clusters): clusters = self._dendrogram.groups(num_clusters) self._centroids = [] for cluster in clusters: assert len(cluster) > 0 if self._should_normalise: centroid = self._normalise(cluster[0]) else: centroid = numpy.array(cluster[0]) for vector in cluster[1:]: if self._should_normalise: centroid += self._normalise(vector) else: centroid += vector centroid /= len(cluster) self._centroids.append(centroid) self._num_clusters = len(self._centroids) def classify_vectorspace(self, vector): best = None for i in range(self._num_clusters): centroid = self._centroids[i] dist = cosine_distance(vector, centroid) if not best or dist < best[0]: best = (dist, i) return best[1] def dendrogram(self): """ :return: The dendrogram representing the current clustering :rtype: Dendrogram """ return self._dendrogram def num_clusters(self): return self._num_clusters def __repr__(self): return "<GroupAverageAgglomerative Clusterer n=%d>" % self._num_clusters The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem: Non-interactive demonstration of the clusterers with simple 2-D data. Here is the function: def demo(): """ Non-interactive demonstration of the clusterers with simple 2-D data. """ from nltk.cluster import GAAClusterer # use a set of tokens with 2D indices vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]] # test the GAAC clusterer with 4 clusters clusterer = GAAClusterer(4) clusters = clusterer.cluster(vectors, True) print("Clusterer:", clusterer) print("Clustered:", vectors) print("As:", clusters) print() # show the dendrogram clusterer.dendrogram().show() # classify a new vector vector = numpy.array([3, 3]) print("classify(%s):" % vector, end=" ") print(clusterer.classify(vector)) print()
Non-interactive demonstration of the clusterers with simple 2-D data.
170,674
try: import numpy except ImportError: pass from nltk.cluster.util import VectorSpaceClusterer class EMClusterer(VectorSpaceClusterer): """ The Gaussian EM clusterer models the vectors as being produced by a mixture of k Gaussian sources. The parameters of these sources (prior probability, mean and covariance matrix) are then found to maximise the likelihood of the given data. This is done with the expectation maximisation algorithm. It starts with k arbitrarily chosen means, priors and covariance matrices. It then calculates the membership probabilities for each vector in each of the clusters; this is the 'E' step. The cluster parameters are then updated in the 'M' step using the maximum likelihood estimate from the cluster membership probabilities. This process continues until the likelihood of the data does not significantly increase. """ def __init__( self, initial_means, priors=None, covariance_matrices=None, conv_threshold=1e-6, bias=0.1, normalise=False, svd_dimensions=None, ): """ Creates an EM clusterer with the given starting parameters, convergence threshold and vector mangling parameters. :param initial_means: the means of the gaussian cluster centers :type initial_means: [seq of] numpy array or seq of SparseArray :param priors: the prior probability for each cluster :type priors: numpy array or seq of float :param covariance_matrices: the covariance matrix for each cluster :type covariance_matrices: [seq of] numpy array :param conv_threshold: maximum change in likelihood before deemed convergent :type conv_threshold: int or float :param bias: variance bias used to ensure non-singular covariance matrices :type bias: float :param normalise: should vectors be normalised to length 1 :type normalise: boolean :param svd_dimensions: number of dimensions to use in reducing vector dimensionsionality with SVD :type svd_dimensions: int """ VectorSpaceClusterer.__init__(self, normalise, svd_dimensions) self._means = numpy.array(initial_means, numpy.float64) self._num_clusters = len(initial_means) self._conv_threshold = conv_threshold self._covariance_matrices = covariance_matrices self._priors = priors self._bias = bias def num_clusters(self): return self._num_clusters def cluster_vectorspace(self, vectors, trace=False): assert len(vectors) > 0 # set the parameters to initial values dimensions = len(vectors[0]) means = self._means priors = self._priors if not priors: priors = self._priors = ( numpy.ones(self._num_clusters, numpy.float64) / self._num_clusters ) covariances = self._covariance_matrices if not covariances: covariances = self._covariance_matrices = [ numpy.identity(dimensions, numpy.float64) for i in range(self._num_clusters) ] # do the E and M steps until the likelihood plateaus lastl = self._loglikelihood(vectors, priors, means, covariances) converged = False while not converged: if trace: print("iteration; loglikelihood", lastl) # E-step, calculate hidden variables, h[i,j] h = numpy.zeros((len(vectors), self._num_clusters), numpy.float64) for i in range(len(vectors)): for j in range(self._num_clusters): h[i, j] = priors[j] * self._gaussian( means[j], covariances[j], vectors[i] ) h[i, :] /= sum(h[i, :]) # M-step, update parameters - cvm, p, mean for j in range(self._num_clusters): covariance_before = covariances[j] new_covariance = numpy.zeros((dimensions, dimensions), numpy.float64) new_mean = numpy.zeros(dimensions, numpy.float64) sum_hj = 0.0 for i in range(len(vectors)): delta = vectors[i] - means[j] new_covariance += h[i, j] * numpy.multiply.outer(delta, delta) sum_hj += h[i, j] new_mean += h[i, j] * vectors[i] covariances[j] = new_covariance / sum_hj means[j] = new_mean / sum_hj priors[j] = sum_hj / len(vectors) # bias term to stop covariance matrix being singular covariances[j] += self._bias * numpy.identity(dimensions, numpy.float64) # calculate likelihood - FIXME: may be broken l = self._loglikelihood(vectors, priors, means, covariances) # check for convergence if abs(lastl - l) < self._conv_threshold: converged = True lastl = l def classify_vectorspace(self, vector): best = None for j in range(self._num_clusters): p = self._priors[j] * self._gaussian( self._means[j], self._covariance_matrices[j], vector ) if not best or p > best[0]: best = (p, j) return best[1] def likelihood_vectorspace(self, vector, cluster): cid = self.cluster_names().index(cluster) return self._priors[cluster] * self._gaussian( self._means[cluster], self._covariance_matrices[cluster], vector ) def _gaussian(self, mean, cvm, x): m = len(mean) assert cvm.shape == (m, m), "bad sized covariance matrix, %s" % str(cvm.shape) try: det = numpy.linalg.det(cvm) inv = numpy.linalg.inv(cvm) a = det**-0.5 * (2 * numpy.pi) ** (-m / 2.0) dx = x - mean print(dx, inv) b = -0.5 * numpy.dot(numpy.dot(dx, inv), dx) return a * numpy.exp(b) except OverflowError: # happens when the exponent is negative infinity - i.e. b = 0 # i.e. the inverse of cvm is huge (cvm is almost zero) return 0 def _loglikelihood(self, vectors, priors, means, covariances): llh = 0.0 for vector in vectors: p = 0 for j in range(len(priors)): p += priors[j] * self._gaussian(means[j], covariances[j], vector) llh += numpy.log(p) return llh def __repr__(self): return "<EMClusterer means=%s>" % list(self._means) The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo()` to solve the following problem: Non-interactive demonstration of the clusterers with simple 2-D data. Here is the function: def demo(): """ Non-interactive demonstration of the clusterers with simple 2-D data. """ from nltk import cluster # example from figure 14.10, page 519, Manning and Schutze vectors = [numpy.array(f) for f in [[0.5, 0.5], [1.5, 0.5], [1, 3]]] means = [[4, 2], [4, 2.01]] clusterer = cluster.EMClusterer(means, bias=0.1) clusters = clusterer.cluster(vectors, True, trace=True) print("Clustered:", vectors) print("As: ", clusters) print() for c in range(2): print("Cluster:", c) print("Prior: ", clusterer._priors[c]) print("Mean: ", clusterer._means[c]) print("Covar: ", clusterer._covariance_matrices[c]) print() # classify a new vector vector = numpy.array([2, 2]) print("classify(%s):" % vector, end=" ") print(clusterer.classify(vector)) # show the classification probabilities vector = numpy.array([2, 2]) print("classification_probdist(%s):" % vector) pdist = clusterer.classification_probdist(vector) for sample in pdist.samples(): print(f"{sample} => {pdist.prob(sample) * 100:.0f}%")
Non-interactive demonstration of the clusterers with simple 2-D data.
170,675
import codecs import functools import os import pickle import re import sys import textwrap import zipfile from abc import ABCMeta, abstractmethod from gzip import WRITE as GZ_WRITE from gzip import GzipFile from io import BytesIO, TextIOWrapper from urllib.request import url2pathname, urlopen from nltk import grammar, sem from nltk.compat import add_py3_data, py3_data from nltk.internals import deprecated class GzipFile(_compression.BaseStream): myfileobj: Optional[IO[bytes]] mode: str name: str compress: zlib._Compress fileobj: IO[bytes] def __init__( self, filename: Optional[AnyPath] = ..., mode: Optional[str] = ..., compresslevel: int = ..., fileobj: Optional[IO[bytes]] = ..., mtime: Optional[float] = ..., ) -> None: ... def filename(self) -> str: ... def mtime(self) -> Optional[int]: ... crc: int def write(self, data: ReadableBuffer) -> int: ... def read(self, size: Optional[int] = ...) -> bytes: ... def read1(self, size: int = ...) -> bytes: ... def peek(self, n: int) -> bytes: ... def closed(self) -> bool: ... def close(self) -> None: ... def flush(self, zlib_mode: int = ...) -> None: ... def fileno(self) -> int: ... def rewind(self) -> None: ... def readable(self) -> bool: ... def writable(self) -> bool: ... def seekable(self) -> bool: ... def seek(self, offset: int, whence: int = ...) -> int: ... def readline(self, size: Optional[int] = ...) -> bytes: ... class TextIOWrapper(TextIOBase, TextIO): def __init__( self, buffer: IO[bytes], encoding: Optional[str] = ..., errors: Optional[str] = ..., newline: Optional[str] = ..., line_buffering: bool = ..., write_through: bool = ..., ) -> None: ... def buffer(self) -> BinaryIO: ... def closed(self) -> bool: ... def line_buffering(self) -> bool: ... if sys.version_info >= (3, 7): def write_through(self) -> bool: ... def reconfigure( self, *, encoding: Optional[str] = ..., errors: Optional[str] = ..., newline: Optional[str] = ..., line_buffering: Optional[bool] = ..., write_through: Optional[bool] = ..., ) -> None: ... # These are inherited from TextIOBase, but must exist in the stub to satisfy mypy. def __enter__(self: _T) -> _T: ... def __iter__(self) -> Iterator[str]: ... # type: ignore def __next__(self) -> str: ... # type: ignore def writelines(self, __lines: Iterable[str]) -> None: ... # type: ignore def readline(self, __size: int = ...) -> str: ... # type: ignore def readlines(self, __hint: int = ...) -> List[str]: ... # type: ignore def seek(self, __cookie: int, __whence: int = ...) -> int: ... def gzip_open_unicode( filename, mode="rb", compresslevel=9, encoding="utf-8", fileobj=None, errors=None, newline=None, ): if fileobj is None: fileobj = GzipFile(filename, mode, compresslevel, fileobj) return TextIOWrapper(fileobj, encoding, errors, newline)
null
170,676
import codecs import functools import os import pickle import re import sys import textwrap import zipfile from abc import ABCMeta, abstractmethod from gzip import WRITE as GZ_WRITE from gzip import GzipFile from io import BytesIO, TextIOWrapper from urllib.request import url2pathname, urlopen from nltk import grammar, sem from nltk.compat import add_py3_data, py3_data from nltk.internals import deprecated path = [] path += [d for d in _paths_from_env if d] if "APPENGINE_RUNTIME" not in os.environ and os.path.expanduser("~/") != "~/": path.append(os.path.expanduser("~/nltk_data")) def normalize_resource_url(resource_url): r""" Normalizes a resource url >>> windows = sys.platform.startswith('win') >>> os.path.normpath(split_resource_url(normalize_resource_url('file:grammar.fcfg'))[1]) == \ ... ('\\' if windows else '') + os.path.abspath(os.path.join(os.curdir, 'grammar.fcfg')) True >>> not windows or normalize_resource_url('file:C:/dir/file') == 'file:///C:/dir/file' True >>> not windows or normalize_resource_url('file:C:\\dir\\file') == 'file:///C:/dir/file' True >>> not windows or normalize_resource_url('file:C:\\dir/file') == 'file:///C:/dir/file' True >>> not windows or normalize_resource_url('file://C:/dir/file') == 'file:///C:/dir/file' True >>> not windows or normalize_resource_url('file:////C:/dir/file') == 'file:///C:/dir/file' True >>> not windows or normalize_resource_url('nltk:C:/dir/file') == 'file:///C:/dir/file' True >>> not windows or normalize_resource_url('nltk:C:\\dir\\file') == 'file:///C:/dir/file' True >>> windows or normalize_resource_url('file:/dir/file/toy.cfg') == 'file:///dir/file/toy.cfg' True >>> normalize_resource_url('nltk:home/nltk') 'nltk:home/nltk' >>> windows or normalize_resource_url('nltk:/home/nltk') == 'file:///home/nltk' True >>> normalize_resource_url('https://example.com/dir/file') 'https://example.com/dir/file' >>> normalize_resource_url('dir/file') 'nltk:dir/file' """ try: protocol, name = split_resource_url(resource_url) except ValueError: # the resource url has no protocol, use the nltk protocol by default protocol = "nltk" name = resource_url # use file protocol if the path is an absolute path if protocol == "nltk" and os.path.isabs(name): protocol = "file://" name = normalize_resource_name(name, False, None) elif protocol == "file": protocol = "file://" # name is absolute name = normalize_resource_name(name, False, None) elif protocol == "nltk": protocol = "nltk:" name = normalize_resource_name(name, True) else: # handled by urllib protocol += "://" return "".join([protocol, name]) def _open(resource_url): """ Helper function that returns an open file object for a resource, given its resource URL. If the given resource URL uses the "nltk:" protocol, or uses no protocol, then use ``nltk.data.find`` to find its path, and open it with the given mode; if the resource URL uses the 'file' protocol, then open the file with the given mode; otherwise, delegate to ``urllib2.urlopen``. :type resource_url: str :param resource_url: A URL specifying where the resource should be loaded from. The default protocol is "nltk:", which searches for the file in the the NLTK data package. """ resource_url = normalize_resource_url(resource_url) protocol, path_ = split_resource_url(resource_url) if protocol is None or protocol.lower() == "nltk": return find(path_, path + [""]).open() elif protocol.lower() == "file": # urllib might not use mode='rb', so handle this one ourselves: return find(path_, [""]).open() else: return urlopen(resource_url) The provided code snippet includes necessary dependencies for implementing the `retrieve` function. Write a Python function `def retrieve(resource_url, filename=None, verbose=True)` to solve the following problem: Copy the given resource to a local file. If no filename is specified, then use the URL's filename. If there is already a file named ``filename``, then raise a ``ValueError``. :type resource_url: str :param resource_url: A URL specifying where the resource should be loaded from. The default protocol is "nltk:", which searches for the file in the the NLTK data package. Here is the function: def retrieve(resource_url, filename=None, verbose=True): """ Copy the given resource to a local file. If no filename is specified, then use the URL's filename. If there is already a file named ``filename``, then raise a ``ValueError``. :type resource_url: str :param resource_url: A URL specifying where the resource should be loaded from. The default protocol is "nltk:", which searches for the file in the the NLTK data package. """ resource_url = normalize_resource_url(resource_url) if filename is None: if resource_url.startswith("file:"): filename = os.path.split(resource_url)[-1] else: filename = re.sub(r"(^\w+:)?.*/", "", resource_url) if os.path.exists(filename): filename = os.path.abspath(filename) raise ValueError("File %r already exists!" % filename) if verbose: print(f"Retrieving {resource_url!r}, saving to {filename!r}") # Open the input & output streams. infile = _open(resource_url) # Copy infile -> outfile, using 64k blocks. with open(filename, "wb") as outfile: while True: s = infile.read(1024 * 64) # 64k blocks. outfile.write(s) if not s: break infile.close()
Copy the given resource to a local file. If no filename is specified, then use the URL's filename. If there is already a file named ``filename``, then raise a ``ValueError``. :type resource_url: str :param resource_url: A URL specifying where the resource should be loaded from. The default protocol is "nltk:", which searches for the file in the the NLTK data package.
170,677
import codecs import functools import os import pickle import re import sys import textwrap import zipfile from abc import ABCMeta, abstractmethod from gzip import WRITE as GZ_WRITE from gzip import GzipFile from io import BytesIO, TextIOWrapper from urllib.request import url2pathname, urlopen from nltk import grammar, sem from nltk.compat import add_py3_data, py3_data from nltk.internals import deprecated def normalize_resource_url(resource_url): r""" Normalizes a resource url >>> windows = sys.platform.startswith('win') >>> os.path.normpath(split_resource_url(normalize_resource_url('file:grammar.fcfg'))[1]) == \ ... ('\\' if windows else '') + os.path.abspath(os.path.join(os.curdir, 'grammar.fcfg')) True >>> not windows or normalize_resource_url('file:C:/dir/file') == 'file:///C:/dir/file' True >>> not windows or normalize_resource_url('file:C:\\dir\\file') == 'file:///C:/dir/file' True >>> not windows or normalize_resource_url('file:C:\\dir/file') == 'file:///C:/dir/file' True >>> not windows or normalize_resource_url('file://C:/dir/file') == 'file:///C:/dir/file' True >>> not windows or normalize_resource_url('file:////C:/dir/file') == 'file:///C:/dir/file' True >>> not windows or normalize_resource_url('nltk:C:/dir/file') == 'file:///C:/dir/file' True >>> not windows or normalize_resource_url('nltk:C:\\dir\\file') == 'file:///C:/dir/file' True >>> windows or normalize_resource_url('file:/dir/file/toy.cfg') == 'file:///dir/file/toy.cfg' True >>> normalize_resource_url('nltk:home/nltk') 'nltk:home/nltk' >>> windows or normalize_resource_url('nltk:/home/nltk') == 'file:///home/nltk' True >>> normalize_resource_url('https://example.com/dir/file') 'https://example.com/dir/file' >>> normalize_resource_url('dir/file') 'nltk:dir/file' """ try: protocol, name = split_resource_url(resource_url) except ValueError: # the resource url has no protocol, use the nltk protocol by default protocol = "nltk" name = resource_url # use file protocol if the path is an absolute path if protocol == "nltk" and os.path.isabs(name): protocol = "file://" name = normalize_resource_name(name, False, None) elif protocol == "file": protocol = "file://" # name is absolute name = normalize_resource_name(name, False, None) elif protocol == "nltk": protocol = "nltk:" name = normalize_resource_name(name, True) else: # handled by urllib protocol += "://" return "".join([protocol, name]) def load( resource_url, format="auto", cache=True, verbose=False, logic_parser=None, fstruct_reader=None, encoding=None, ): """ Load a given resource from the NLTK data package. The following resource formats are currently supported: - ``pickle`` - ``json`` - ``yaml`` - ``cfg`` (context free grammars) - ``pcfg`` (probabilistic CFGs) - ``fcfg`` (feature-based CFGs) - ``fol`` (formulas of First Order Logic) - ``logic`` (Logical formulas to be parsed by the given logic_parser) - ``val`` (valuation of First Order Logic model) - ``text`` (the file contents as a unicode string) - ``raw`` (the raw file contents as a byte string) If no format is specified, ``load()`` will attempt to determine a format based on the resource name's file extension. If that fails, ``load()`` will raise a ``ValueError`` exception. For all text formats (everything except ``pickle``, ``json``, ``yaml`` and ``raw``), it tries to decode the raw contents using UTF-8, and if that doesn't work, it tries with ISO-8859-1 (Latin-1), unless the ``encoding`` is specified. :type resource_url: str :param resource_url: A URL specifying where the resource should be loaded from. The default protocol is "nltk:", which searches for the file in the the NLTK data package. :type cache: bool :param cache: If true, add this resource to a cache. If load() finds a resource in its cache, then it will return it from the cache rather than loading it. :type verbose: bool :param verbose: If true, print a message when loading a resource. Messages are not displayed when a resource is retrieved from the cache. :type logic_parser: LogicParser :param logic_parser: The parser that will be used to parse logical expressions. :type fstruct_reader: FeatStructReader :param fstruct_reader: The parser that will be used to parse the feature structure of an fcfg. :type encoding: str :param encoding: the encoding of the input; only used for text formats. """ resource_url = normalize_resource_url(resource_url) resource_url = add_py3_data(resource_url) # Determine the format of the resource. if format == "auto": resource_url_parts = resource_url.split(".") ext = resource_url_parts[-1] if ext == "gz": ext = resource_url_parts[-2] format = AUTO_FORMATS.get(ext) if format is None: raise ValueError( "Could not determine format for %s based " 'on its file\nextension; use the "format" ' "argument to specify the format explicitly." % resource_url ) if format not in FORMATS: raise ValueError(f"Unknown format type: {format}!") # If we've cached the resource, then just return it. if cache: resource_val = _resource_cache.get((resource_url, format)) if resource_val is not None: if verbose: print(f"<<Using cached copy of {resource_url}>>") return resource_val # Let the user know what's going on. if verbose: print(f"<<Loading {resource_url}>>") # Load the resource. opened_resource = _open(resource_url) if format == "raw": resource_val = opened_resource.read() elif format == "pickle": resource_val = pickle.load(opened_resource) elif format == "json": import json from nltk.jsontags import json_tags resource_val = json.load(opened_resource) tag = None if len(resource_val) != 1: tag = next(resource_val.keys()) if tag not in json_tags: raise ValueError("Unknown json tag.") elif format == "yaml": import yaml resource_val = yaml.safe_load(opened_resource) else: # The resource is a text format. binary_data = opened_resource.read() if encoding is not None: string_data = binary_data.decode(encoding) else: try: string_data = binary_data.decode("utf-8") except UnicodeDecodeError: string_data = binary_data.decode("latin-1") if format == "text": resource_val = string_data elif format == "cfg": resource_val = grammar.CFG.fromstring(string_data, encoding=encoding) elif format == "pcfg": resource_val = grammar.PCFG.fromstring(string_data, encoding=encoding) elif format == "fcfg": resource_val = grammar.FeatureGrammar.fromstring( string_data, logic_parser=logic_parser, fstruct_reader=fstruct_reader, encoding=encoding, ) elif format == "fol": resource_val = sem.read_logic( string_data, logic_parser=sem.logic.LogicParser(), encoding=encoding, ) elif format == "logic": resource_val = sem.read_logic( string_data, logic_parser=logic_parser, encoding=encoding ) elif format == "val": resource_val = sem.read_valuation(string_data, encoding=encoding) else: raise AssertionError( "Internal NLTK error: Format %s isn't " "handled by nltk.data.load()" % (format,) ) opened_resource.close() # If requested, add it to the cache. if cache: try: _resource_cache[(resource_url, format)] = resource_val # TODO: add this line # print('<<Caching a copy of %s>>' % (resource_url,)) except TypeError: # We can't create weak references to some object types, like # strings and tuples. For now, just don't cache them. pass return resource_val The provided code snippet includes necessary dependencies for implementing the `show_cfg` function. Write a Python function `def show_cfg(resource_url, escape="##")` to solve the following problem: Write out a grammar file, ignoring escaped and empty lines. :type resource_url: str :param resource_url: A URL specifying where the resource should be loaded from. The default protocol is "nltk:", which searches for the file in the the NLTK data package. :type escape: str :param escape: Prepended string that signals lines to be ignored Here is the function: def show_cfg(resource_url, escape="##"): """ Write out a grammar file, ignoring escaped and empty lines. :type resource_url: str :param resource_url: A URL specifying where the resource should be loaded from. The default protocol is "nltk:", which searches for the file in the the NLTK data package. :type escape: str :param escape: Prepended string that signals lines to be ignored """ resource_url = normalize_resource_url(resource_url) resource_val = load(resource_url, format="text", cache=False) lines = resource_val.splitlines() for l in lines: if l.startswith(escape): continue if re.match("^$", l): continue print(l)
Write out a grammar file, ignoring escaped and empty lines. :type resource_url: str :param resource_url: A URL specifying where the resource should be loaded from. The default protocol is "nltk:", which searches for the file in the the NLTK data package. :type escape: str :param escape: Prepended string that signals lines to be ignored
170,678
import codecs import functools import os import pickle import re import sys import textwrap import zipfile from abc import ABCMeta, abstractmethod from gzip import WRITE as GZ_WRITE from gzip import GzipFile from io import BytesIO, TextIOWrapper from urllib.request import url2pathname, urlopen from nltk import grammar, sem from nltk.compat import add_py3_data, py3_data from nltk.internals import deprecated _resource_cache = {} The provided code snippet includes necessary dependencies for implementing the `clear_cache` function. Write a Python function `def clear_cache()` to solve the following problem: Remove all objects from the resource cache. :see: load() Here is the function: def clear_cache(): """ Remove all objects from the resource cache. :see: load() """ _resource_cache.clear()
Remove all objects from the resource cache. :see: load()
170,679
import codecs import csv import json import pickle import random import re import sys import time from copy import deepcopy import nltk from nltk.corpus import CategorizedPlaintextCorpusReader from nltk.data import load from nltk.tokenize.casual import EMOTICON_RE The provided code snippet includes necessary dependencies for implementing the `timer` function. Write a Python function `def timer(method)` to solve the following problem: A timer decorator to measure execution performance of methods. Here is the function: def timer(method): """ A timer decorator to measure execution performance of methods. """ def timed(*args, **kw): start = time.time() result = method(*args, **kw) end = time.time() tot_time = end - start hours = tot_time // 3600 mins = tot_time // 60 % 60 # in Python 2.x round() will return a float, so we convert it to int secs = int(round(tot_time % 60)) if hours == 0 and mins == 0 and secs < 10: print(f"[TIMER] {method.__name__}(): {method.__name__:.3f} seconds") else: print(f"[TIMER] {method.__name__}(): {hours}h {mins}m {secs}s") return result return timed
A timer decorator to measure execution performance of methods.
170,680
import codecs import csv import json import pickle import random import re import sys import time from copy import deepcopy import nltk from nltk.corpus import CategorizedPlaintextCorpusReader from nltk.data import load from nltk.tokenize.casual import EMOTICON_RE def extract_unigram_feats(document, unigrams, handle_negation=False): """ Populate a dictionary of unigram features, reflecting the presence/absence in the document of each of the tokens in `unigrams`. :param document: a list of words/tokens. :param unigrams: a list of words/tokens whose presence/absence has to be checked in `document`. :param handle_negation: if `handle_negation == True` apply `mark_negation` method to `document` before checking for unigram presence/absence. :return: a dictionary of unigram features {unigram : boolean}. >>> words = ['ice', 'police', 'riot'] >>> document = 'ice is melting due to global warming'.split() >>> sorted(extract_unigram_feats(document, words).items()) [('contains(ice)', True), ('contains(police)', False), ('contains(riot)', False)] """ features = {} if handle_negation: document = mark_negation(document) for word in unigrams: features[f"contains({word})"] = word in set(document) return features def extract_bigram_feats(document, bigrams): """ Populate a dictionary of bigram features, reflecting the presence/absence in the document of each of the tokens in `bigrams`. This extractor function only considers contiguous bigrams obtained by `nltk.bigrams`. :param document: a list of words/tokens. :param unigrams: a list of bigrams whose presence/absence has to be checked in `document`. :return: a dictionary of bigram features {bigram : boolean}. >>> bigrams = [('global', 'warming'), ('police', 'prevented'), ('love', 'you')] >>> document = 'ice is melting due to global warming'.split() >>> sorted(extract_bigram_feats(document, bigrams).items()) # doctest: +NORMALIZE_WHITESPACE [('contains(global - warming)', True), ('contains(love - you)', False), ('contains(police - prevented)', False)] """ features = {} for bigr in bigrams: features[f"contains({bigr[0]} - {bigr[1]})"] = bigr in nltk.bigrams(document) return features def output_markdown(filename, **kwargs): """ Write the output of an analysis to a file. """ with codecs.open(filename, "at") as outfile: text = "\n*** \n\n" text += "{} \n\n".format(time.strftime("%d/%m/%Y, %H:%M")) for k in sorted(kwargs): if isinstance(kwargs[k], dict): dictionary = kwargs[k] text += f" - **{k}:**\n" for entry in sorted(dictionary): text += f" - {entry}: {dictionary[entry]} \n" elif isinstance(kwargs[k], list): text += f" - **{k}:**\n" for entry in kwargs[k]: text += f" - {entry}\n" else: text += f" - **{k}:** {kwargs[k]} \n" outfile.write(text) def split_train_test(all_instances, n=None): """ Randomly split `n` instances of the dataset into train and test sets. :param all_instances: a list of instances (e.g. documents) that will be split. :param n: the number of instances to consider (in case we want to use only a subset). :return: two lists of instances. Train set is 8/10 of the total and test set is 2/10 of the total. """ random.seed(12345) random.shuffle(all_instances) if not n or n > len(all_instances): n = len(all_instances) train_set = all_instances[: int(0.8 * n)] test_set = all_instances[int(0.8 * n) : n] return train_set, test_set def json2csv_preprocess( json_file, outfile, fields, encoding="utf8", errors="replace", gzip_compress=False, skip_retweets=True, skip_tongue_tweets=True, skip_ambiguous_tweets=True, strip_off_emoticons=True, remove_duplicates=True, limit=None, ): """ Convert json file to csv file, preprocessing each row to obtain a suitable dataset for tweets Semantic Analysis. :param json_file: the original json file containing tweets. :param outfile: the output csv filename. :param fields: a list of fields that will be extracted from the json file and kept in the output csv file. :param encoding: the encoding of the files. :param errors: the error handling strategy for the output writer. :param gzip_compress: if True, create a compressed GZIP file. :param skip_retweets: if True, remove retweets. :param skip_tongue_tweets: if True, remove tweets containing ":P" and ":-P" emoticons. :param skip_ambiguous_tweets: if True, remove tweets containing both happy and sad emoticons. :param strip_off_emoticons: if True, strip off emoticons from all tweets. :param remove_duplicates: if True, remove tweets appearing more than once. :param limit: an integer to set the number of tweets to convert. After the limit is reached the conversion will stop. It can be useful to create subsets of the original tweets json data. """ with codecs.open(json_file, encoding=encoding) as fp: (writer, outf) = _outf_writer(outfile, encoding, errors, gzip_compress) # write the list of fields as header writer.writerow(fields) if remove_duplicates == True: tweets_cache = [] i = 0 for line in fp: tweet = json.loads(line) row = extract_fields(tweet, fields) try: text = row[fields.index("text")] # Remove retweets if skip_retweets == True: if re.search(r"\bRT\b", text): continue # Remove tweets containing ":P" and ":-P" emoticons if skip_tongue_tweets == True: if re.search(r"\:\-?P\b", text): continue # Remove tweets containing both happy and sad emoticons if skip_ambiguous_tweets == True: all_emoticons = EMOTICON_RE.findall(text) if all_emoticons: if (set(all_emoticons) & HAPPY) and (set(all_emoticons) & SAD): continue # Strip off emoticons from all tweets if strip_off_emoticons == True: row[fields.index("text")] = re.sub( r"(?!\n)\s+", " ", EMOTICON_RE.sub("", text) ) # Remove duplicate tweets if remove_duplicates == True: if row[fields.index("text")] in tweets_cache: continue else: tweets_cache.append(row[fields.index("text")]) except ValueError: pass writer.writerow(row) i += 1 if limit and i >= limit: break outf.close() def parse_tweets_set( filename, label, word_tokenizer=None, sent_tokenizer=None, skip_header=True ): """ Parse csv file containing tweets and output data a list of (text, label) tuples. :param filename: the input csv filename. :param label: the label to be appended to each tweet contained in the csv file. :param word_tokenizer: the tokenizer instance that will be used to tokenize each sentence into tokens (e.g. WordPunctTokenizer() or BlanklineTokenizer()). If no word_tokenizer is specified, tweets will not be tokenized. :param sent_tokenizer: the tokenizer that will be used to split each tweet into sentences. :param skip_header: if True, skip the first line of the csv file (which usually contains headers). :return: a list of (text, label) tuples. """ tweets = [] if not sent_tokenizer: sent_tokenizer = load("tokenizers/punkt/english.pickle") with codecs.open(filename, "rt") as csvfile: reader = csv.reader(csvfile) if skip_header == True: next(reader, None) # skip the header i = 0 for tweet_id, text in reader: # text = text[1] i += 1 sys.stdout.write(f"Loaded {i} tweets\r") # Apply sentence and word tokenizer to text if word_tokenizer: tweet = [ w for sent in sent_tokenizer.tokenize(text) for w in word_tokenizer.tokenize(sent) ] else: tweet = text tweets.append((tweet, label)) print(f"Loaded {i} tweets") return tweets stopwords: WordListCorpusReader = LazyCorpusLoader( "stopwords", WordListCorpusReader, r"(?!README|\.).*", encoding="utf8" ) twitter_samples: TwitterCorpusReader = LazyCorpusLoader( "twitter_samples", TwitterCorpusReader, r".*\.json" ) The provided code snippet includes necessary dependencies for implementing the `demo_tweets` function. Write a Python function `def demo_tweets(trainer, n_instances=None, output=None)` to solve the following problem: Train and test Naive Bayes classifier on 10000 tweets, tokenized using TweetTokenizer. Features are composed of: - 1000 most frequent unigrams - 100 top bigrams (using BigramAssocMeasures.pmi) :param trainer: `train` method of a classifier. :param n_instances: the number of total tweets that have to be used for training and testing. Tweets will be equally split between positive and negative. :param output: the output file where results have to be reported. Here is the function: def demo_tweets(trainer, n_instances=None, output=None): """ Train and test Naive Bayes classifier on 10000 tweets, tokenized using TweetTokenizer. Features are composed of: - 1000 most frequent unigrams - 100 top bigrams (using BigramAssocMeasures.pmi) :param trainer: `train` method of a classifier. :param n_instances: the number of total tweets that have to be used for training and testing. Tweets will be equally split between positive and negative. :param output: the output file where results have to be reported. """ from nltk.corpus import stopwords, twitter_samples from nltk.sentiment import SentimentAnalyzer from nltk.tokenize import TweetTokenizer # Different customizations for the TweetTokenizer tokenizer = TweetTokenizer(preserve_case=False) # tokenizer = TweetTokenizer(preserve_case=True, strip_handles=True) # tokenizer = TweetTokenizer(reduce_len=True, strip_handles=True) if n_instances is not None: n_instances = int(n_instances / 2) fields = ["id", "text"] positive_json = twitter_samples.abspath("positive_tweets.json") positive_csv = "positive_tweets.csv" json2csv_preprocess(positive_json, positive_csv, fields, limit=n_instances) negative_json = twitter_samples.abspath("negative_tweets.json") negative_csv = "negative_tweets.csv" json2csv_preprocess(negative_json, negative_csv, fields, limit=n_instances) neg_docs = parse_tweets_set(negative_csv, label="neg", word_tokenizer=tokenizer) pos_docs = parse_tweets_set(positive_csv, label="pos", word_tokenizer=tokenizer) # We separately split subjective and objective instances to keep a balanced # uniform class distribution in both train and test sets. train_pos_docs, test_pos_docs = split_train_test(pos_docs) train_neg_docs, test_neg_docs = split_train_test(neg_docs) training_tweets = train_pos_docs + train_neg_docs testing_tweets = test_pos_docs + test_neg_docs sentim_analyzer = SentimentAnalyzer() # stopwords = stopwords.words('english') # all_words = [word for word in sentim_analyzer.all_words(training_tweets) if word.lower() not in stopwords] all_words = [word for word in sentim_analyzer.all_words(training_tweets)] # Add simple unigram word features unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Add bigram collocation features bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats( [tweet[0] for tweet in training_tweets], top_n=100, min_freq=12 ) sentim_analyzer.add_feat_extractor( extract_bigram_feats, bigrams=bigram_collocs_feats ) training_set = sentim_analyzer.apply_features(training_tweets) test_set = sentim_analyzer.apply_features(testing_tweets) classifier = sentim_analyzer.train(trainer, training_set) # classifier = sentim_analyzer.train(trainer, training_set, max_iter=4) try: classifier.show_most_informative_features() except AttributeError: print( "Your classifier does not provide a show_most_informative_features() method." ) results = sentim_analyzer.evaluate(test_set) if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown( output, Dataset="labeled_tweets", Classifier=type(classifier).__name__, Tokenizer=tokenizer.__class__.__name__, Feats=extr, Results=results, Instances=n_instances, )
Train and test Naive Bayes classifier on 10000 tweets, tokenized using TweetTokenizer. Features are composed of: - 1000 most frequent unigrams - 100 top bigrams (using BigramAssocMeasures.pmi) :param trainer: `train` method of a classifier. :param n_instances: the number of total tweets that have to be used for training and testing. Tweets will be equally split between positive and negative. :param output: the output file where results have to be reported.
170,681
import codecs import csv import json import pickle import random import re import sys import time from copy import deepcopy import nltk from nltk.corpus import CategorizedPlaintextCorpusReader from nltk.data import load from nltk.tokenize.casual import EMOTICON_RE def extract_unigram_feats(document, unigrams, handle_negation=False): """ Populate a dictionary of unigram features, reflecting the presence/absence in the document of each of the tokens in `unigrams`. :param document: a list of words/tokens. :param unigrams: a list of words/tokens whose presence/absence has to be checked in `document`. :param handle_negation: if `handle_negation == True` apply `mark_negation` method to `document` before checking for unigram presence/absence. :return: a dictionary of unigram features {unigram : boolean}. >>> words = ['ice', 'police', 'riot'] >>> document = 'ice is melting due to global warming'.split() >>> sorted(extract_unigram_feats(document, words).items()) [('contains(ice)', True), ('contains(police)', False), ('contains(riot)', False)] """ features = {} if handle_negation: document = mark_negation(document) for word in unigrams: features[f"contains({word})"] = word in set(document) return features def output_markdown(filename, **kwargs): """ Write the output of an analysis to a file. """ with codecs.open(filename, "at") as outfile: text = "\n*** \n\n" text += "{} \n\n".format(time.strftime("%d/%m/%Y, %H:%M")) for k in sorted(kwargs): if isinstance(kwargs[k], dict): dictionary = kwargs[k] text += f" - **{k}:**\n" for entry in sorted(dictionary): text += f" - {entry}: {dictionary[entry]} \n" elif isinstance(kwargs[k], list): text += f" - **{k}:**\n" for entry in kwargs[k]: text += f" - {entry}\n" else: text += f" - **{k}:** {kwargs[k]} \n" outfile.write(text) def split_train_test(all_instances, n=None): """ Randomly split `n` instances of the dataset into train and test sets. :param all_instances: a list of instances (e.g. documents) that will be split. :param n: the number of instances to consider (in case we want to use only a subset). :return: two lists of instances. Train set is 8/10 of the total and test set is 2/10 of the total. """ random.seed(12345) random.shuffle(all_instances) if not n or n > len(all_instances): n = len(all_instances) train_set = all_instances[: int(0.8 * n)] test_set = all_instances[int(0.8 * n) : n] return train_set, test_set movie_reviews: CategorizedPlaintextCorpusReader = LazyCorpusLoader( "movie_reviews", CategorizedPlaintextCorpusReader, r"(?!\.).*\.txt", cat_pattern=r"(neg|pos)/.*", encoding="ascii", ) The provided code snippet includes necessary dependencies for implementing the `demo_movie_reviews` function. Write a Python function `def demo_movie_reviews(trainer, n_instances=None, output=None)` to solve the following problem: Train classifier on all instances of the Movie Reviews dataset. The corpus has been preprocessed using the default sentence tokenizer and WordPunctTokenizer. Features are composed of: - most frequent unigrams :param trainer: `train` method of a classifier. :param n_instances: the number of total reviews that have to be used for training and testing. Reviews will be equally split between positive and negative. :param output: the output file where results have to be reported. Here is the function: def demo_movie_reviews(trainer, n_instances=None, output=None): """ Train classifier on all instances of the Movie Reviews dataset. The corpus has been preprocessed using the default sentence tokenizer and WordPunctTokenizer. Features are composed of: - most frequent unigrams :param trainer: `train` method of a classifier. :param n_instances: the number of total reviews that have to be used for training and testing. Reviews will be equally split between positive and negative. :param output: the output file where results have to be reported. """ from nltk.corpus import movie_reviews from nltk.sentiment import SentimentAnalyzer if n_instances is not None: n_instances = int(n_instances / 2) pos_docs = [ (list(movie_reviews.words(pos_id)), "pos") for pos_id in movie_reviews.fileids("pos")[:n_instances] ] neg_docs = [ (list(movie_reviews.words(neg_id)), "neg") for neg_id in movie_reviews.fileids("neg")[:n_instances] ] # We separately split positive and negative instances to keep a balanced # uniform class distribution in both train and test sets. train_pos_docs, test_pos_docs = split_train_test(pos_docs) train_neg_docs, test_neg_docs = split_train_test(neg_docs) training_docs = train_pos_docs + train_neg_docs testing_docs = test_pos_docs + test_neg_docs sentim_analyzer = SentimentAnalyzer() all_words = sentim_analyzer.all_words(training_docs) # Add simple unigram word features unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Apply features to obtain a feature-value representation of our datasets training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) classifier = sentim_analyzer.train(trainer, training_set) try: classifier.show_most_informative_features() except AttributeError: print( "Your classifier does not provide a show_most_informative_features() method." ) results = sentim_analyzer.evaluate(test_set) if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown( output, Dataset="Movie_reviews", Classifier=type(classifier).__name__, Tokenizer="WordPunctTokenizer", Feats=extr, Results=results, Instances=n_instances, )
Train classifier on all instances of the Movie Reviews dataset. The corpus has been preprocessed using the default sentence tokenizer and WordPunctTokenizer. Features are composed of: - most frequent unigrams :param trainer: `train` method of a classifier. :param n_instances: the number of total reviews that have to be used for training and testing. Reviews will be equally split between positive and negative. :param output: the output file where results have to be reported.
170,682
import codecs import csv import json import pickle import random import re import sys import time from copy import deepcopy import nltk from nltk.corpus import CategorizedPlaintextCorpusReader from nltk.data import load from nltk.tokenize.casual import EMOTICON_RE def demo_subjectivity(trainer, save_analyzer=False, n_instances=None, output=None): """ Train and test a classifier on instances of the Subjective Dataset by Pang and Lee. The dataset is made of 5000 subjective and 5000 objective sentences. All tokens (words and punctuation marks) are separated by a whitespace, so we use the basic WhitespaceTokenizer to parse the data. :param trainer: `train` method of a classifier. :param save_analyzer: if `True`, store the SentimentAnalyzer in a pickle file. :param n_instances: the number of total sentences that have to be used for training and testing. Sentences will be equally split between positive and negative. :param output: the output file where results have to be reported. """ from nltk.corpus import subjectivity from nltk.sentiment import SentimentAnalyzer if n_instances is not None: n_instances = int(n_instances / 2) subj_docs = [ (sent, "subj") for sent in subjectivity.sents(categories="subj")[:n_instances] ] obj_docs = [ (sent, "obj") for sent in subjectivity.sents(categories="obj")[:n_instances] ] # We separately split subjective and objective instances to keep a balanced # uniform class distribution in both train and test sets. train_subj_docs, test_subj_docs = split_train_test(subj_docs) train_obj_docs, test_obj_docs = split_train_test(obj_docs) training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in training_docs] ) # Add simple unigram word features handling negation unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Apply features to obtain a feature-value representation of our datasets training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) classifier = sentim_analyzer.train(trainer, training_set) try: classifier.show_most_informative_features() except AttributeError: print( "Your classifier does not provide a show_most_informative_features() method." ) results = sentim_analyzer.evaluate(test_set) if save_analyzer == True: sentim_analyzer.save_file(sentim_analyzer, "sa_subjectivity.pickle") if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown( output, Dataset="subjectivity", Classifier=type(classifier).__name__, Tokenizer="WhitespaceTokenizer", Feats=extr, Instances=n_instances, Results=results, ) return sentim_analyzer def load( resource_url, format="auto", cache=True, verbose=False, logic_parser=None, fstruct_reader=None, encoding=None, ): """ Load a given resource from the NLTK data package. The following resource formats are currently supported: - ``pickle`` - ``json`` - ``yaml`` - ``cfg`` (context free grammars) - ``pcfg`` (probabilistic CFGs) - ``fcfg`` (feature-based CFGs) - ``fol`` (formulas of First Order Logic) - ``logic`` (Logical formulas to be parsed by the given logic_parser) - ``val`` (valuation of First Order Logic model) - ``text`` (the file contents as a unicode string) - ``raw`` (the raw file contents as a byte string) If no format is specified, ``load()`` will attempt to determine a format based on the resource name's file extension. If that fails, ``load()`` will raise a ``ValueError`` exception. For all text formats (everything except ``pickle``, ``json``, ``yaml`` and ``raw``), it tries to decode the raw contents using UTF-8, and if that doesn't work, it tries with ISO-8859-1 (Latin-1), unless the ``encoding`` is specified. :type resource_url: str :param resource_url: A URL specifying where the resource should be loaded from. The default protocol is "nltk:", which searches for the file in the the NLTK data package. :type cache: bool :param cache: If true, add this resource to a cache. If load() finds a resource in its cache, then it will return it from the cache rather than loading it. :type verbose: bool :param verbose: If true, print a message when loading a resource. Messages are not displayed when a resource is retrieved from the cache. :type logic_parser: LogicParser :param logic_parser: The parser that will be used to parse logical expressions. :type fstruct_reader: FeatStructReader :param fstruct_reader: The parser that will be used to parse the feature structure of an fcfg. :type encoding: str :param encoding: the encoding of the input; only used for text formats. """ resource_url = normalize_resource_url(resource_url) resource_url = add_py3_data(resource_url) # Determine the format of the resource. if format == "auto": resource_url_parts = resource_url.split(".") ext = resource_url_parts[-1] if ext == "gz": ext = resource_url_parts[-2] format = AUTO_FORMATS.get(ext) if format is None: raise ValueError( "Could not determine format for %s based " 'on its file\nextension; use the "format" ' "argument to specify the format explicitly." % resource_url ) if format not in FORMATS: raise ValueError(f"Unknown format type: {format}!") # If we've cached the resource, then just return it. if cache: resource_val = _resource_cache.get((resource_url, format)) if resource_val is not None: if verbose: print(f"<<Using cached copy of {resource_url}>>") return resource_val # Let the user know what's going on. if verbose: print(f"<<Loading {resource_url}>>") # Load the resource. opened_resource = _open(resource_url) if format == "raw": resource_val = opened_resource.read() elif format == "pickle": resource_val = pickle.load(opened_resource) elif format == "json": import json from nltk.jsontags import json_tags resource_val = json.load(opened_resource) tag = None if len(resource_val) != 1: tag = next(resource_val.keys()) if tag not in json_tags: raise ValueError("Unknown json tag.") elif format == "yaml": import yaml resource_val = yaml.safe_load(opened_resource) else: # The resource is a text format. binary_data = opened_resource.read() if encoding is not None: string_data = binary_data.decode(encoding) else: try: string_data = binary_data.decode("utf-8") except UnicodeDecodeError: string_data = binary_data.decode("latin-1") if format == "text": resource_val = string_data elif format == "cfg": resource_val = grammar.CFG.fromstring(string_data, encoding=encoding) elif format == "pcfg": resource_val = grammar.PCFG.fromstring(string_data, encoding=encoding) elif format == "fcfg": resource_val = grammar.FeatureGrammar.fromstring( string_data, logic_parser=logic_parser, fstruct_reader=fstruct_reader, encoding=encoding, ) elif format == "fol": resource_val = sem.read_logic( string_data, logic_parser=sem.logic.LogicParser(), encoding=encoding, ) elif format == "logic": resource_val = sem.read_logic( string_data, logic_parser=logic_parser, encoding=encoding ) elif format == "val": resource_val = sem.read_valuation(string_data, encoding=encoding) else: raise AssertionError( "Internal NLTK error: Format %s isn't " "handled by nltk.data.load()" % (format,) ) opened_resource.close() # If requested, add it to the cache. if cache: try: _resource_cache[(resource_url, format)] = resource_val # TODO: add this line # print('<<Caching a copy of %s>>' % (resource_url,)) except TypeError: # We can't create weak references to some object types, like # strings and tuples. For now, just don't cache them. pass return resource_val The provided code snippet includes necessary dependencies for implementing the `demo_sent_subjectivity` function. Write a Python function `def demo_sent_subjectivity(text)` to solve the following problem: Classify a single sentence as subjective or objective using a stored SentimentAnalyzer. :param text: a sentence whose subjectivity has to be classified. Here is the function: def demo_sent_subjectivity(text): """ Classify a single sentence as subjective or objective using a stored SentimentAnalyzer. :param text: a sentence whose subjectivity has to be classified. """ from nltk.classify import NaiveBayesClassifier from nltk.tokenize import regexp word_tokenizer = regexp.WhitespaceTokenizer() try: sentim_analyzer = load("sa_subjectivity.pickle") except LookupError: print("Cannot find the sentiment analyzer you want to load.") print("Training a new one using NaiveBayesClassifier.") sentim_analyzer = demo_subjectivity(NaiveBayesClassifier.train, True) # Tokenize and convert to lower case tokenized_text = [word.lower() for word in word_tokenizer.tokenize(text)] print(sentim_analyzer.classify(tokenized_text))
Classify a single sentence as subjective or objective using a stored SentimentAnalyzer. :param text: a sentence whose subjectivity has to be classified.
170,683
import codecs import csv import json import pickle import random import re import sys import time from copy import deepcopy import nltk from nltk.corpus import CategorizedPlaintextCorpusReader from nltk.data import load from nltk.tokenize.casual import EMOTICON_RE def _show_plot(x_values, y_values, x_labels=None, y_labels=None): try: import matplotlib.pyplot as plt except ImportError as e: raise ImportError( "The plot function requires matplotlib to be installed." "See https://matplotlib.org/" ) from e plt.locator_params(axis="y", nbins=3) axes = plt.axes() axes.yaxis.grid() plt.plot(x_values, y_values, "ro", color="red") plt.ylim(ymin=-1.2, ymax=1.2) plt.tight_layout(pad=5) if x_labels: plt.xticks(x_values, x_labels, rotation="vertical") if y_labels: plt.yticks([-1, 0, 1], y_labels, rotation="horizontal") # Pad margins so that markers are not clipped by the axes plt.margins(0.2) plt.show() opinion_lexicon: OpinionLexiconCorpusReader = LazyCorpusLoader( "opinion_lexicon", OpinionLexiconCorpusReader, r"(\w+)\-words\.txt", encoding="ISO-8859-2", ) The provided code snippet includes necessary dependencies for implementing the `demo_liu_hu_lexicon` function. Write a Python function `def demo_liu_hu_lexicon(sentence, plot=False)` to solve the following problem: Basic example of sentiment classification using Liu and Hu opinion lexicon. This function simply counts the number of positive, negative and neutral words in the sentence and classifies it depending on which polarity is more represented. Words that do not appear in the lexicon are considered as neutral. :param sentence: a sentence whose polarity has to be classified. :param plot: if True, plot a visual representation of the sentence polarity. Here is the function: def demo_liu_hu_lexicon(sentence, plot=False): """ Basic example of sentiment classification using Liu and Hu opinion lexicon. This function simply counts the number of positive, negative and neutral words in the sentence and classifies it depending on which polarity is more represented. Words that do not appear in the lexicon are considered as neutral. :param sentence: a sentence whose polarity has to be classified. :param plot: if True, plot a visual representation of the sentence polarity. """ from nltk.corpus import opinion_lexicon from nltk.tokenize import treebank tokenizer = treebank.TreebankWordTokenizer() pos_words = 0 neg_words = 0 tokenized_sent = [word.lower() for word in tokenizer.tokenize(sentence)] x = list(range(len(tokenized_sent))) # x axis for the plot y = [] for word in tokenized_sent: if word in opinion_lexicon.positive(): pos_words += 1 y.append(1) # positive elif word in opinion_lexicon.negative(): neg_words += 1 y.append(-1) # negative else: y.append(0) # neutral if pos_words > neg_words: print("Positive") elif pos_words < neg_words: print("Negative") elif pos_words == neg_words: print("Neutral") if plot == True: _show_plot( x, y, x_labels=tokenized_sent, y_labels=["Negative", "Neutral", "Positive"] )
Basic example of sentiment classification using Liu and Hu opinion lexicon. This function simply counts the number of positive, negative and neutral words in the sentence and classifies it depending on which polarity is more represented. Words that do not appear in the lexicon are considered as neutral. :param sentence: a sentence whose polarity has to be classified. :param plot: if True, plot a visual representation of the sentence polarity.
170,684
import codecs import csv import json import pickle import random import re import sys import time from copy import deepcopy import nltk from nltk.corpus import CategorizedPlaintextCorpusReader from nltk.data import load from nltk.tokenize.casual import EMOTICON_RE The provided code snippet includes necessary dependencies for implementing the `demo_vader_instance` function. Write a Python function `def demo_vader_instance(text)` to solve the following problem: Output polarity scores for a text using Vader approach. :param text: a text whose polarity has to be evaluated. Here is the function: def demo_vader_instance(text): """ Output polarity scores for a text using Vader approach. :param text: a text whose polarity has to be evaluated. """ from nltk.sentiment import SentimentIntensityAnalyzer vader_analyzer = SentimentIntensityAnalyzer() print(vader_analyzer.polarity_scores(text))
Output polarity scores for a text using Vader approach. :param text: a text whose polarity has to be evaluated.
170,685
import codecs import csv import json import pickle import random import re import sys import time from copy import deepcopy import nltk from nltk.corpus import CategorizedPlaintextCorpusReader from nltk.data import load from nltk.tokenize.casual import EMOTICON_RE def output_markdown(filename, **kwargs): """ Write the output of an analysis to a file. """ with codecs.open(filename, "at") as outfile: text = "\n*** \n\n" text += "{} \n\n".format(time.strftime("%d/%m/%Y, %H:%M")) for k in sorted(kwargs): if isinstance(kwargs[k], dict): dictionary = kwargs[k] text += f" - **{k}:**\n" for entry in sorted(dictionary): text += f" - {entry}: {dictionary[entry]} \n" elif isinstance(kwargs[k], list): text += f" - **{k}:**\n" for entry in kwargs[k]: text += f" - {entry}\n" else: text += f" - **{k}:** {kwargs[k]} \n" outfile.write(text) def split_train_test(all_instances, n=None): """ Randomly split `n` instances of the dataset into train and test sets. :param all_instances: a list of instances (e.g. documents) that will be split. :param n: the number of instances to consider (in case we want to use only a subset). :return: two lists of instances. Train set is 8/10 of the total and test set is 2/10 of the total. """ random.seed(12345) random.shuffle(all_instances) if not n or n > len(all_instances): n = len(all_instances) train_set = all_instances[: int(0.8 * n)] test_set = all_instances[int(0.8 * n) : n] return train_set, test_set def json2csv_preprocess( json_file, outfile, fields, encoding="utf8", errors="replace", gzip_compress=False, skip_retweets=True, skip_tongue_tweets=True, skip_ambiguous_tweets=True, strip_off_emoticons=True, remove_duplicates=True, limit=None, ): """ Convert json file to csv file, preprocessing each row to obtain a suitable dataset for tweets Semantic Analysis. :param json_file: the original json file containing tweets. :param outfile: the output csv filename. :param fields: a list of fields that will be extracted from the json file and kept in the output csv file. :param encoding: the encoding of the files. :param errors: the error handling strategy for the output writer. :param gzip_compress: if True, create a compressed GZIP file. :param skip_retweets: if True, remove retweets. :param skip_tongue_tweets: if True, remove tweets containing ":P" and ":-P" emoticons. :param skip_ambiguous_tweets: if True, remove tweets containing both happy and sad emoticons. :param strip_off_emoticons: if True, strip off emoticons from all tweets. :param remove_duplicates: if True, remove tweets appearing more than once. :param limit: an integer to set the number of tweets to convert. After the limit is reached the conversion will stop. It can be useful to create subsets of the original tweets json data. """ with codecs.open(json_file, encoding=encoding) as fp: (writer, outf) = _outf_writer(outfile, encoding, errors, gzip_compress) # write the list of fields as header writer.writerow(fields) if remove_duplicates == True: tweets_cache = [] i = 0 for line in fp: tweet = json.loads(line) row = extract_fields(tweet, fields) try: text = row[fields.index("text")] # Remove retweets if skip_retweets == True: if re.search(r"\bRT\b", text): continue # Remove tweets containing ":P" and ":-P" emoticons if skip_tongue_tweets == True: if re.search(r"\:\-?P\b", text): continue # Remove tweets containing both happy and sad emoticons if skip_ambiguous_tweets == True: all_emoticons = EMOTICON_RE.findall(text) if all_emoticons: if (set(all_emoticons) & HAPPY) and (set(all_emoticons) & SAD): continue # Strip off emoticons from all tweets if strip_off_emoticons == True: row[fields.index("text")] = re.sub( r"(?!\n)\s+", " ", EMOTICON_RE.sub("", text) ) # Remove duplicate tweets if remove_duplicates == True: if row[fields.index("text")] in tweets_cache: continue else: tweets_cache.append(row[fields.index("text")]) except ValueError: pass writer.writerow(row) i += 1 if limit and i >= limit: break outf.close() def parse_tweets_set( filename, label, word_tokenizer=None, sent_tokenizer=None, skip_header=True ): """ Parse csv file containing tweets and output data a list of (text, label) tuples. :param filename: the input csv filename. :param label: the label to be appended to each tweet contained in the csv file. :param word_tokenizer: the tokenizer instance that will be used to tokenize each sentence into tokens (e.g. WordPunctTokenizer() or BlanklineTokenizer()). If no word_tokenizer is specified, tweets will not be tokenized. :param sent_tokenizer: the tokenizer that will be used to split each tweet into sentences. :param skip_header: if True, skip the first line of the csv file (which usually contains headers). :return: a list of (text, label) tuples. """ tweets = [] if not sent_tokenizer: sent_tokenizer = load("tokenizers/punkt/english.pickle") with codecs.open(filename, "rt") as csvfile: reader = csv.reader(csvfile) if skip_header == True: next(reader, None) # skip the header i = 0 for tweet_id, text in reader: # text = text[1] i += 1 sys.stdout.write(f"Loaded {i} tweets\r") # Apply sentence and word tokenizer to text if word_tokenizer: tweet = [ w for sent in sent_tokenizer.tokenize(text) for w in word_tokenizer.tokenize(sent) ] else: tweet = text tweets.append((tweet, label)) print(f"Loaded {i} tweets") return tweets twitter_samples: TwitterCorpusReader = LazyCorpusLoader( "twitter_samples", TwitterCorpusReader, r".*\.json" ) class defaultdict(Dict[_KT, _VT], Generic[_KT, _VT]): default_factory: Callable[[], _VT] def __init__(self, **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]]) -> None: ... def __init__( self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT ) -> None: ... def __missing__(self, key: _KT) -> _VT: ... def copy(self: _S) -> _S: ... The provided code snippet includes necessary dependencies for implementing the `demo_vader_tweets` function. Write a Python function `def demo_vader_tweets(n_instances=None, output=None)` to solve the following problem: Classify 10000 positive and negative tweets using Vader approach. :param n_instances: the number of total tweets that have to be classified. :param output: the output file where results have to be reported. Here is the function: def demo_vader_tweets(n_instances=None, output=None): """ Classify 10000 positive and negative tweets using Vader approach. :param n_instances: the number of total tweets that have to be classified. :param output: the output file where results have to be reported. """ from collections import defaultdict from nltk.corpus import twitter_samples from nltk.metrics import accuracy as eval_accuracy from nltk.metrics import f_measure as eval_f_measure from nltk.metrics import precision as eval_precision from nltk.metrics import recall as eval_recall from nltk.sentiment import SentimentIntensityAnalyzer if n_instances is not None: n_instances = int(n_instances / 2) fields = ["id", "text"] positive_json = twitter_samples.abspath("positive_tweets.json") positive_csv = "positive_tweets.csv" json2csv_preprocess( positive_json, positive_csv, fields, strip_off_emoticons=False, limit=n_instances, ) negative_json = twitter_samples.abspath("negative_tweets.json") negative_csv = "negative_tweets.csv" json2csv_preprocess( negative_json, negative_csv, fields, strip_off_emoticons=False, limit=n_instances, ) pos_docs = parse_tweets_set(positive_csv, label="pos") neg_docs = parse_tweets_set(negative_csv, label="neg") # We separately split subjective and objective instances to keep a balanced # uniform class distribution in both train and test sets. train_pos_docs, test_pos_docs = split_train_test(pos_docs) train_neg_docs, test_neg_docs = split_train_test(neg_docs) training_tweets = train_pos_docs + train_neg_docs testing_tweets = test_pos_docs + test_neg_docs vader_analyzer = SentimentIntensityAnalyzer() gold_results = defaultdict(set) test_results = defaultdict(set) acc_gold_results = [] acc_test_results = [] labels = set() num = 0 for i, (text, label) in enumerate(testing_tweets): labels.add(label) gold_results[label].add(i) acc_gold_results.append(label) score = vader_analyzer.polarity_scores(text)["compound"] if score > 0: observed = "pos" else: observed = "neg" num += 1 acc_test_results.append(observed) test_results[observed].add(i) metrics_results = {} for label in labels: accuracy_score = eval_accuracy(acc_gold_results, acc_test_results) metrics_results["Accuracy"] = accuracy_score precision_score = eval_precision(gold_results[label], test_results[label]) metrics_results[f"Precision [{label}]"] = precision_score recall_score = eval_recall(gold_results[label], test_results[label]) metrics_results[f"Recall [{label}]"] = recall_score f_measure_score = eval_f_measure(gold_results[label], test_results[label]) metrics_results[f"F-measure [{label}]"] = f_measure_score for result in sorted(metrics_results): print(f"{result}: {metrics_results[result]}") if output: output_markdown( output, Approach="Vader", Dataset="labeled_tweets", Instances=n_instances, Results=metrics_results, )
Classify 10000 positive and negative tweets using Vader approach. :param n_instances: the number of total tweets that have to be classified. :param output: the output file where results have to be reported.
170,686
import html import re from collections import defaultdict def extract_rels(subjclass, objclass, doc, corpus="ace", pattern=None, window=10): """ Filter the output of ``semi_rel2reldict`` according to specified NE classes and a filler pattern. The parameters ``subjclass`` and ``objclass`` can be used to restrict the Named Entities to particular types (any of 'LOCATION', 'ORGANIZATION', 'PERSON', 'DURATION', 'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE'). :param subjclass: the class of the subject Named Entity. :type subjclass: str :param objclass: the class of the object Named Entity. :type objclass: str :param doc: input document :type doc: ieer document or a list of chunk trees :param corpus: name of the corpus to take as input; possible values are 'ieer' and 'conll2002' :type corpus: str :param pattern: a regular expression for filtering the fillers of retrieved triples. :type pattern: SRE_Pattern :param window: filters out fillers which exceed this threshold :type window: int :return: see ``mk_reldicts`` :rtype: list(defaultdict) """ if subjclass and subjclass not in NE_CLASSES[corpus]: if _expand(subjclass) in NE_CLASSES[corpus]: subjclass = _expand(subjclass) else: raise ValueError( "your value for the subject type has not been recognized: %s" % subjclass ) if objclass and objclass not in NE_CLASSES[corpus]: if _expand(objclass) in NE_CLASSES[corpus]: objclass = _expand(objclass) else: raise ValueError( "your value for the object type has not been recognized: %s" % objclass ) if corpus == "ace" or corpus == "conll2002": pairs = tree2semi_rel(doc) elif corpus == "ieer": pairs = tree2semi_rel(doc.text) + tree2semi_rel(doc.headline) else: raise ValueError("corpus type not recognized") reldicts = semi_rel2reldict(pairs) relfilter = lambda x: ( x["subjclass"] == subjclass and len(x["filler"].split()) <= window and pattern.match(x["filler"]) and x["objclass"] == objclass ) return list(filter(relfilter, reldicts)) def rtuple(reldict, lcon=False, rcon=False): """ Pretty print the reldict as an rtuple. :param reldict: a relation dictionary :type reldict: defaultdict """ items = [ class_abbrev(reldict["subjclass"]), reldict["subjtext"], reldict["filler"], class_abbrev(reldict["objclass"]), reldict["objtext"], ] format = "[%s: %r] %r [%s: %r]" if lcon: items = [reldict["lcon"]] + items format = "...%r)" + format if rcon: items.append(reldict["rcon"]) format = format + "(%r..." printargs = tuple(items) return format % printargs def clause(reldict, relsym): """ Print the relation in clausal form. :param reldict: a relation dictionary :type reldict: defaultdict :param relsym: a label for the relation :type relsym: str """ items = (relsym, reldict["subjsym"], reldict["objsym"]) return "%s(%r, %r)" % items ieer: IEERCorpusReader = LazyCorpusLoader("ieer", IEERCorpusReader, r"(?!README|\.).*") The provided code snippet includes necessary dependencies for implementing the `in_demo` function. Write a Python function `def in_demo(trace=0, sql=True)` to solve the following problem: Select pairs of organizations and locations whose mentions occur with an intervening occurrence of the preposition "in". If the sql parameter is set to True, then the entity pairs are loaded into an in-memory database, and subsequently pulled out using an SQL "SELECT" query. Here is the function: def in_demo(trace=0, sql=True): """ Select pairs of organizations and locations whose mentions occur with an intervening occurrence of the preposition "in". If the sql parameter is set to True, then the entity pairs are loaded into an in-memory database, and subsequently pulled out using an SQL "SELECT" query. """ from nltk.corpus import ieer if sql: try: import sqlite3 connection = sqlite3.connect(":memory:") cur = connection.cursor() cur.execute( """create table Locations (OrgName text, LocationName text, DocID text)""" ) except ImportError: import warnings warnings.warn("Cannot import sqlite; sql flag will be ignored.") IN = re.compile(r".*\bin\b(?!\b.+ing)") print() print("IEER: in(ORG, LOC) -- just the clauses:") print("=" * 45) for file in ieer.fileids(): for doc in ieer.parsed_docs(file): if trace: print(doc.docno) print("=" * 15) for rel in extract_rels("ORG", "LOC", doc, corpus="ieer", pattern=IN): print(clause(rel, relsym="IN")) if sql: try: rtuple = (rel["subjtext"], rel["objtext"], doc.docno) cur.execute( """insert into Locations values (?, ?, ?)""", rtuple, ) connection.commit() except NameError: pass if sql: try: cur.execute( """select OrgName from Locations where LocationName = 'Atlanta'""" ) print() print("Extract data from SQL table: ORGs in Atlanta") print("-" * 15) for row in cur: print(row) except NameError: pass
Select pairs of organizations and locations whose mentions occur with an intervening occurrence of the preposition "in". If the sql parameter is set to True, then the entity pairs are loaded into an in-memory database, and subsequently pulled out using an SQL "SELECT" query.
170,687
import html import re from collections import defaultdict def extract_rels(subjclass, objclass, doc, corpus="ace", pattern=None, window=10): """ Filter the output of ``semi_rel2reldict`` according to specified NE classes and a filler pattern. The parameters ``subjclass`` and ``objclass`` can be used to restrict the Named Entities to particular types (any of 'LOCATION', 'ORGANIZATION', 'PERSON', 'DURATION', 'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE'). :param subjclass: the class of the subject Named Entity. :type subjclass: str :param objclass: the class of the object Named Entity. :type objclass: str :param doc: input document :type doc: ieer document or a list of chunk trees :param corpus: name of the corpus to take as input; possible values are 'ieer' and 'conll2002' :type corpus: str :param pattern: a regular expression for filtering the fillers of retrieved triples. :type pattern: SRE_Pattern :param window: filters out fillers which exceed this threshold :type window: int :return: see ``mk_reldicts`` :rtype: list(defaultdict) """ if subjclass and subjclass not in NE_CLASSES[corpus]: if _expand(subjclass) in NE_CLASSES[corpus]: subjclass = _expand(subjclass) else: raise ValueError( "your value for the subject type has not been recognized: %s" % subjclass ) if objclass and objclass not in NE_CLASSES[corpus]: if _expand(objclass) in NE_CLASSES[corpus]: objclass = _expand(objclass) else: raise ValueError( "your value for the object type has not been recognized: %s" % objclass ) if corpus == "ace" or corpus == "conll2002": pairs = tree2semi_rel(doc) elif corpus == "ieer": pairs = tree2semi_rel(doc.text) + tree2semi_rel(doc.headline) else: raise ValueError("corpus type not recognized") reldicts = semi_rel2reldict(pairs) relfilter = lambda x: ( x["subjclass"] == subjclass and len(x["filler"].split()) <= window and pattern.match(x["filler"]) and x["objclass"] == objclass ) return list(filter(relfilter, reldicts)) def rtuple(reldict, lcon=False, rcon=False): """ Pretty print the reldict as an rtuple. :param reldict: a relation dictionary :type reldict: defaultdict """ items = [ class_abbrev(reldict["subjclass"]), reldict["subjtext"], reldict["filler"], class_abbrev(reldict["objclass"]), reldict["objtext"], ] format = "[%s: %r] %r [%s: %r]" if lcon: items = [reldict["lcon"]] + items format = "...%r)" + format if rcon: items.append(reldict["rcon"]) format = format + "(%r..." printargs = tuple(items) return format % printargs ieer: IEERCorpusReader = LazyCorpusLoader("ieer", IEERCorpusReader, r"(?!README|\.).*") def roles_demo(trace=0): from nltk.corpus import ieer roles = r""" (.*( # assorted roles analyst| chair(wo)?man| commissioner| counsel| director| economist| editor| executive| foreman| governor| head| lawyer| leader| librarian).*)| manager| partner| president| producer| professor| researcher| spokes(wo)?man| writer| ,\sof\sthe?\s* # "X, of (the) Y" """ ROLES = re.compile(roles, re.VERBOSE) print() print("IEER: has_role(PER, ORG) -- raw rtuples:") print("=" * 45) for file in ieer.fileids(): for doc in ieer.parsed_docs(file): lcon = rcon = False if trace: print(doc.docno) print("=" * 15) lcon = rcon = True for rel in extract_rels("PER", "ORG", doc, corpus="ieer", pattern=ROLES): print(rtuple(rel, lcon=lcon, rcon=rcon))
null
170,688
import html import re from collections import defaultdict ieer: IEERCorpusReader = LazyCorpusLoader("ieer", IEERCorpusReader, r"(?!README|\.).*") def ieer_headlines(): from nltk.corpus import ieer from nltk.tree import Tree print("IEER: First 20 Headlines") print("=" * 45) trees = [ (doc.docno, doc.headline) for file in ieer.fileids() for doc in ieer.parsed_docs(file) ] for tree in trees[:20]: print() print("%s:\n%s" % tree)
null
170,689
import html import re from collections import defaultdict def extract_rels(subjclass, objclass, doc, corpus="ace", pattern=None, window=10): """ Filter the output of ``semi_rel2reldict`` according to specified NE classes and a filler pattern. The parameters ``subjclass`` and ``objclass`` can be used to restrict the Named Entities to particular types (any of 'LOCATION', 'ORGANIZATION', 'PERSON', 'DURATION', 'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE'). :param subjclass: the class of the subject Named Entity. :type subjclass: str :param objclass: the class of the object Named Entity. :type objclass: str :param doc: input document :type doc: ieer document or a list of chunk trees :param corpus: name of the corpus to take as input; possible values are 'ieer' and 'conll2002' :type corpus: str :param pattern: a regular expression for filtering the fillers of retrieved triples. :type pattern: SRE_Pattern :param window: filters out fillers which exceed this threshold :type window: int :return: see ``mk_reldicts`` :rtype: list(defaultdict) """ if subjclass and subjclass not in NE_CLASSES[corpus]: if _expand(subjclass) in NE_CLASSES[corpus]: subjclass = _expand(subjclass) else: raise ValueError( "your value for the subject type has not been recognized: %s" % subjclass ) if objclass and objclass not in NE_CLASSES[corpus]: if _expand(objclass) in NE_CLASSES[corpus]: objclass = _expand(objclass) else: raise ValueError( "your value for the object type has not been recognized: %s" % objclass ) if corpus == "ace" or corpus == "conll2002": pairs = tree2semi_rel(doc) elif corpus == "ieer": pairs = tree2semi_rel(doc.text) + tree2semi_rel(doc.headline) else: raise ValueError("corpus type not recognized") reldicts = semi_rel2reldict(pairs) relfilter = lambda x: ( x["subjclass"] == subjclass and len(x["filler"].split()) <= window and pattern.match(x["filler"]) and x["objclass"] == objclass ) return list(filter(relfilter, reldicts)) def rtuple(reldict, lcon=False, rcon=False): """ Pretty print the reldict as an rtuple. :param reldict: a relation dictionary :type reldict: defaultdict """ items = [ class_abbrev(reldict["subjclass"]), reldict["subjtext"], reldict["filler"], class_abbrev(reldict["objclass"]), reldict["objtext"], ] format = "[%s: %r] %r [%s: %r]" if lcon: items = [reldict["lcon"]] + items format = "...%r)" + format if rcon: items.append(reldict["rcon"]) format = format + "(%r..." printargs = tuple(items) return format % printargs conll2002: ConllChunkCorpusReader = LazyCorpusLoader( "conll2002", ConllChunkCorpusReader, r".*\.(test|train).*", ("LOC", "PER", "ORG", "MISC"), encoding="utf-8", ) The provided code snippet includes necessary dependencies for implementing the `conllned` function. Write a Python function `def conllned(trace=1)` to solve the following problem: Find the copula+'van' relation ('of') in the Dutch tagged training corpus from CoNLL 2002. Here is the function: def conllned(trace=1): """ Find the copula+'van' relation ('of') in the Dutch tagged training corpus from CoNLL 2002. """ from nltk.corpus import conll2002 vnv = """ ( is/V| # 3rd sing present and was/V| # past forms of the verb zijn ('be') werd/V| # and also present wordt/V # past of worden ('become) ) .* # followed by anything van/Prep # followed by van ('of') """ VAN = re.compile(vnv, re.VERBOSE) print() print("Dutch CoNLL2002: van(PER, ORG) -- raw rtuples with context:") print("=" * 45) for doc in conll2002.chunked_sents("ned.train"): lcon = rcon = False if trace: lcon = rcon = True for rel in extract_rels( "PER", "ORG", doc, corpus="conll2002", pattern=VAN, window=10 ): print(rtuple(rel, lcon=lcon, rcon=rcon))
Find the copula+'van' relation ('of') in the Dutch tagged training corpus from CoNLL 2002.
170,690
import html import re from collections import defaultdict def extract_rels(subjclass, objclass, doc, corpus="ace", pattern=None, window=10): def clause(reldict, relsym): conll2002: ConllChunkCorpusReader = LazyCorpusLoader( "conll2002", ConllChunkCorpusReader, r".*\.(test|train).*", ("LOC", "PER", "ORG", "MISC"), encoding="utf-8", ) def conllesp(): from nltk.corpus import conll2002 de = """ .* ( de/SP| del/SP ) """ DE = re.compile(de, re.VERBOSE) print() print("Spanish CoNLL2002: de(ORG, LOC) -- just the first 10 clauses:") print("=" * 45) rels = [ rel for doc in conll2002.chunked_sents("esp.train") for rel in extract_rels("ORG", "LOC", doc, corpus="conll2002", pattern=DE) ] for r in rels[:10]: print(clause(r, relsym="DE")) print()
null
170,691
import html import re from collections import defaultdict def extract_rels(subjclass, objclass, doc, corpus="ace", pattern=None, window=10): def rtuple(reldict, lcon=False, rcon=False): def ne_chunked(): print() print("1500 Sentences from Penn Treebank, as processed by NLTK NE Chunker") print("=" * 45) ROLE = re.compile( r".*(chairman|president|trader|scientist|economist|analyst|partner).*" ) rels = [] for i, sent in enumerate(nltk.corpus.treebank.tagged_sents()[:1500]): sent = nltk.ne_chunk(sent) rels = extract_rels("PER", "ORG", sent, corpus="ace", pattern=ROLE, window=7) for rel in rels: print(f"{i:<5}{rtuple(rel)}")
null
170,692
import codecs from nltk.sem import evaluate def interpret_sents(inputs, grammar, semkey="SEM", trace=0): """ Add the semantic representation to each syntactic parse tree of each input sentence. :param inputs: a list of sentences :type inputs: list(str) :param grammar: ``FeatureGrammar`` or name of feature-based grammar :type grammar: nltk.grammar.FeatureGrammar :return: a mapping from sentences to lists of pairs (parse-tree, semantic-representations) :rtype: list(list(tuple(nltk.tree.Tree, nltk.sem.logic.ConstantExpression))) """ return [ [(syn, root_semrep(syn, semkey)) for syn in syntrees] for syntrees in parse_sents(inputs, grammar, trace=trace) ] class FeatureGrammar(CFG): """ A feature-based grammar. This is equivalent to a ``CFG`` whose nonterminals are all ``FeatStructNonterminal``. A grammar consists of a start state and a set of productions. The set of terminals and nonterminals is implicitly specified by the productions. """ def __init__(self, start, productions): """ Create a new feature-based grammar, from the given start state and set of ``Productions``. :param start: The start symbol :type start: FeatStructNonterminal :param productions: The list of productions that defines the grammar :type productions: list(Production) """ CFG.__init__(self, start, productions) # The difference with CFG is that the productions are # indexed on the TYPE feature of the nonterminals. # This is calculated by the method _get_type_if_possible(). def _calculate_indexes(self): self._lhs_index = {} self._rhs_index = {} self._empty_index = {} self._empty_productions = [] self._lexical_index = {} for prod in self._productions: # Left hand side. lhs = self._get_type_if_possible(prod._lhs) if lhs not in self._lhs_index: self._lhs_index[lhs] = [] self._lhs_index[lhs].append(prod) if prod._rhs: # First item in right hand side. rhs0 = self._get_type_if_possible(prod._rhs[0]) if rhs0 not in self._rhs_index: self._rhs_index[rhs0] = [] self._rhs_index[rhs0].append(prod) else: # The right hand side is empty. if lhs not in self._empty_index: self._empty_index[lhs] = [] self._empty_index[lhs].append(prod) self._empty_productions.append(prod) # Lexical tokens in the right hand side. for token in prod._rhs: if is_terminal(token): self._lexical_index.setdefault(token, set()).add(prod) def fromstring( cls, input, features=None, logic_parser=None, fstruct_reader=None, encoding=None ): """ Return a feature structure based grammar. :param input: a grammar, either in the form of a string or else as a list of strings. :param features: a tuple of features (default: SLASH, TYPE) :param logic_parser: a parser for lambda-expressions, by default, ``LogicParser()`` :param fstruct_reader: a feature structure parser (only if features and logic_parser is None) """ if features is None: features = (SLASH, TYPE) if fstruct_reader is None: fstruct_reader = FeatStructReader( features, FeatStructNonterminal, logic_parser=logic_parser ) elif logic_parser is not None: raise Exception( "'logic_parser' and 'fstruct_reader' must " "not both be set" ) start, productions = read_grammar( input, fstruct_reader.read_partial, encoding=encoding ) return cls(start, productions) def productions(self, lhs=None, rhs=None, empty=False): """ Return the grammar productions, filtered by the left-hand side or the first item in the right-hand side. :param lhs: Only return productions with the given left-hand side. :param rhs: Only return productions with the given first item in the right-hand side. :param empty: Only return productions with an empty right-hand side. :rtype: list(Production) """ if rhs and empty: raise ValueError( "You cannot select empty and non-empty " "productions at the same time." ) # no constraints so return everything if not lhs and not rhs: if empty: return self._empty_productions else: return self._productions # only lhs specified so look up its index elif lhs and not rhs: if empty: return self._empty_index.get(self._get_type_if_possible(lhs), []) else: return self._lhs_index.get(self._get_type_if_possible(lhs), []) # only rhs specified so look up its index elif rhs and not lhs: return self._rhs_index.get(self._get_type_if_possible(rhs), []) # intersect else: return [ prod for prod in self._lhs_index.get(self._get_type_if_possible(lhs), []) if prod in self._rhs_index.get(self._get_type_if_possible(rhs), []) ] def leftcorners(self, cat): """ Return the set of all words that the given category can start with. Also called the "first set" in compiler construction. """ raise NotImplementedError("Not implemented yet") def leftcorner_parents(self, cat): """ Return the set of all categories for which the given category is a left corner. """ raise NotImplementedError("Not implemented yet") def _get_type_if_possible(self, item): """ Helper function which returns the ``TYPE`` feature of the ``item``, if it exists, otherwise it returns the ``item`` itself """ if isinstance(item, dict) and TYPE in item: return FeatureValueType(item[TYPE]) else: return item The provided code snippet includes necessary dependencies for implementing the `demo_legacy_grammar` function. Write a Python function `def demo_legacy_grammar()` to solve the following problem: Check that interpret_sents() is compatible with legacy grammars that use a lowercase 'sem' feature. Define 'test.fcfg' to be the following Here is the function: def demo_legacy_grammar(): """ Check that interpret_sents() is compatible with legacy grammars that use a lowercase 'sem' feature. Define 'test.fcfg' to be the following """ from nltk.grammar import FeatureGrammar g = FeatureGrammar.fromstring( """ % start S S[sem=<hello>] -> 'hello' """ ) print("Reading grammar: %s" % g) print("*" * 20) for reading in interpret_sents(["hello"], g, semkey="sem"): syn, sem = reading[0] print() print("output: ", sem)
Check that interpret_sents() is compatible with legacy grammars that use a lowercase 'sem' feature. Define 'test.fcfg' to be the following
170,693
import codecs from nltk.sem import evaluate def interpret_sents(inputs, grammar, semkey="SEM", trace=0): """ Add the semantic representation to each syntactic parse tree of each input sentence. :param inputs: a list of sentences :type inputs: list(str) :param grammar: ``FeatureGrammar`` or name of feature-based grammar :type grammar: nltk.grammar.FeatureGrammar :return: a mapping from sentences to lists of pairs (parse-tree, semantic-representations) :rtype: list(list(tuple(nltk.tree.Tree, nltk.sem.logic.ConstantExpression))) """ return [ [(syn, root_semrep(syn, semkey)) for syn in syntrees] for syntrees in parse_sents(inputs, grammar, trace=trace) ] def evaluate_sents(inputs, grammar, model, assignment, trace=0): """ Add the truth-in-a-model value to each semantic representation for each syntactic parse of each input sentences. :param inputs: a list of sentences :type inputs: list(str) :param grammar: ``FeatureGrammar`` or name of feature-based grammar :type grammar: nltk.grammar.FeatureGrammar :return: a mapping from sentences to lists of triples (parse-tree, semantic-representations, evaluation-in-model) :rtype: list(list(tuple(nltk.tree.Tree, nltk.sem.logic.ConstantExpression, bool or dict(str): bool))) """ return [ [ (syn, sem, model.evaluate("%s" % sem, assignment, trace=trace)) for (syn, sem) in interpretations ] for interpretations in interpret_sents(inputs, grammar) ] def demo_model0(): global m0, g0 # Initialize a valuation of non-logical constants.""" v = [ ("john", "b1"), ("mary", "g1"), ("suzie", "g2"), ("fido", "d1"), ("tess", "d2"), ("noosa", "n"), ("girl", {"g1", "g2"}), ("boy", {"b1", "b2"}), ("dog", {"d1", "d2"}), ("bark", {"d1", "d2"}), ("walk", {"b1", "g2", "d1"}), ("chase", {("b1", "g1"), ("b2", "g1"), ("g1", "d1"), ("g2", "d2")}), ( "see", {("b1", "g1"), ("b2", "d2"), ("g1", "b1"), ("d2", "b1"), ("g2", "n")}, ), ("in", {("b1", "n"), ("b2", "n"), ("d2", "n")}), ("with", {("b1", "g1"), ("g1", "b1"), ("d1", "b1"), ("b1", "d1")}), ] # Read in the data from ``v`` val = evaluate.Valuation(v) # Bind ``dom`` to the ``domain`` property of ``val`` dom = val.domain # Initialize a model with parameters ``dom`` and ``val``. m0 = evaluate.Model(dom, val) # Initialize a variable assignment with parameter ``dom`` g0 = evaluate.Assignment(dom) def read_sents(filename, encoding="utf8"): with codecs.open(filename, "r", encoding) as fp: sents = [l.rstrip() for l in fp] # get rid of blank lines sents = [l for l in sents if len(l) > 0] sents = [l for l in sents if not l[0] == "#"] return sents class OptionParser(OptionContainer): allow_interspersed_args: bool epilog: Optional[_Text] formatter: HelpFormatter largs: Optional[List[_Text]] option_groups: List[OptionParser] option_list: List[Option] process_default_values: Any prog: Optional[_Text] rargs: Optional[List[Any]] standard_option_list: List[Option] usage: Optional[_Text] values: Optional[Values] version: _Text def __init__( self, usage: Optional[_Text] = ..., option_list: Iterable[Option] = ..., option_class: Type[Option] = ..., version: Optional[_Text] = ..., conflict_handler: _Text = ..., description: Optional[_Text] = ..., formatter: Optional[HelpFormatter] = ..., add_help_option: bool = ..., prog: Optional[_Text] = ..., epilog: Optional[_Text] = ..., ) -> None: ... def _add_help_option(self) -> None: ... def _add_version_option(self) -> None: ... def _create_option_list(self) -> None: ... def _get_all_options(self) -> List[Option]: ... def _get_args(self, args: Iterable[Any]) -> List[Any]: ... def _init_parsing_state(self) -> None: ... def _match_long_opt(self, opt: _Text) -> _Text: ... def _populate_option_list(self, option_list: Iterable[Option], add_help: bool = ...) -> None: ... def _process_args(self, largs: List[Any], rargs: List[Any], values: Values) -> None: ... def _process_long_opt(self, rargs: List[Any], values: Any) -> None: ... def _process_short_opts(self, rargs: List[Any], values: Any) -> None: ... def add_option_group(self, __opt_group: OptionGroup) -> OptionParser: ... def add_option_group(self, *args: Any, **kwargs: Any) -> OptionParser: ... def check_values(self, values: Values, args: List[_Text]) -> Tuple[Values, List[_Text]]: ... def disable_interspersed_args(self) -> None: ... def enable_interspersed_args(self) -> None: ... def error(self, msg: _Text) -> None: ... def exit(self, status: int = ..., msg: Optional[str] = ...) -> None: ... def expand_prog_name(self, s: Optional[_Text]) -> Any: ... def format_epilog(self, formatter: HelpFormatter) -> Any: ... def format_help(self, formatter: Optional[HelpFormatter] = ...) -> _Text: ... def format_option_help(self, formatter: Optional[HelpFormatter] = ...) -> _Text: ... def get_default_values(self) -> Values: ... def get_option_group(self, opt_str: _Text) -> Any: ... def get_prog_name(self) -> _Text: ... def get_usage(self) -> _Text: ... def get_version(self) -> _Text: ... def parse_args( self, args: Optional[Sequence[AnyStr]] = ..., values: Optional[Values] = ... ) -> Tuple[Values, List[AnyStr]]: ... def print_usage(self, file: Optional[IO[str]] = ...) -> None: ... def print_help(self, file: Optional[IO[str]] = ...) -> None: ... def print_version(self, file: Optional[IO[str]] = ...) -> None: ... def set_default(self, dest: Any, value: Any) -> None: ... def set_defaults(self, **kwargs: Any) -> None: ... def set_process_default_values(self, process: Any) -> None: ... def set_usage(self, usage: _Text) -> None: ... def demo(): import sys from optparse import OptionParser description = """ Parse and evaluate some sentences. """ opts = OptionParser(description=description) opts.set_defaults( evaluate=True, beta=True, syntrace=0, semtrace=0, demo="default", grammar="", sentences="", ) opts.add_option( "-d", "--demo", dest="demo", help="choose demo D; omit this for the default demo, or specify 'chat80'", metavar="D", ) opts.add_option( "-g", "--gram", dest="grammar", help="read in grammar G", metavar="G" ) opts.add_option( "-m", "--model", dest="model", help="import model M (omit '.py' suffix)", metavar="M", ) opts.add_option( "-s", "--sentences", dest="sentences", help="read in a file of test sentences S", metavar="S", ) opts.add_option( "-e", "--no-eval", action="store_false", dest="evaluate", help="just do a syntactic analysis", ) opts.add_option( "-b", "--no-beta-reduction", action="store_false", dest="beta", help="don't carry out beta-reduction", ) opts.add_option( "-t", "--syntrace", action="count", dest="syntrace", help="set syntactic tracing on; requires '-e' option", ) opts.add_option( "-T", "--semtrace", action="count", dest="semtrace", help="set semantic tracing on", ) (options, args) = opts.parse_args() SPACER = "-" * 30 demo_model0() sents = [ "Fido sees a boy with Mary", "John sees Mary", "every girl chases a dog", "every boy chases a girl", "John walks with a girl in Noosa", "who walks", ] gramfile = "grammars/sample_grammars/sem2.fcfg" if options.sentences: sentsfile = options.sentences if options.grammar: gramfile = options.grammar if options.model: exec("import %s as model" % options.model) if sents is None: sents = read_sents(sentsfile) # Set model and assignment model = m0 g = g0 if options.evaluate: evaluations = evaluate_sents(sents, gramfile, model, g, trace=options.semtrace) else: semreps = interpret_sents(sents, gramfile, trace=options.syntrace) for i, sent in enumerate(sents): n = 1 print("\nSentence: %s" % sent) print(SPACER) if options.evaluate: for (syntree, semrep, value) in evaluations[i]: if isinstance(value, dict): value = set(value.keys()) print("%d: %s" % (n, semrep)) print(value) n += 1 else: for (syntree, semrep) in semreps[i]: print("%d: %s" % (n, semrep)) n += 1
null
170,694
from nltk.parse import load_parser from nltk.parse.featurechart import InstantiateVarsChart from nltk.sem.logic import ApplicationExpression, LambdaExpression, Variable class CooperStore: """ A container for handling quantifier ambiguity via Cooper storage. """ def __init__(self, featstruct): """ :param featstruct: The value of the ``sem`` node in a tree from ``parse_with_bindops()`` :type featstruct: FeatStruct (with features ``core`` and ``store``) """ self.featstruct = featstruct self.readings = [] try: self.core = featstruct["CORE"] self.store = featstruct["STORE"] except KeyError: print("%s is not a Cooper storage structure" % featstruct) def _permute(self, lst): """ :return: An iterator over the permutations of the input list :type lst: list :rtype: iter """ remove = lambda lst0, index: lst0[:index] + lst0[index + 1 :] if lst: for index, x in enumerate(lst): for y in self._permute(remove(lst, index)): yield (x,) + y else: yield () def s_retrieve(self, trace=False): r""" Carry out S-Retrieval of binding operators in store. If hack=True, serialize the bindop and core as strings and reparse. Ugh. Each permutation of the store (i.e. list of binding operators) is taken to be a possible scoping of quantifiers. We iterate through the binding operators in each permutation, and successively apply them to the current term, starting with the core semantic representation, working from the inside out. Binding operators are of the form:: bo(\P.all x.(man(x) -> P(x)),z1) """ for perm, store_perm in enumerate(self._permute(self.store)): if trace: print("Permutation %s" % (perm + 1)) term = self.core for bindop in store_perm: # we just want the arguments that are wrapped by the 'bo' predicate quant, varex = tuple(bindop.args) # use var to make an abstraction over the current term and then # apply the quantifier to it term = ApplicationExpression( quant, LambdaExpression(varex.variable, term) ) if trace: print(" ", term) term = term.simplify() self.readings.append(term) def parse_with_bindops(sentence, grammar=None, trace=0): """ Use a grammar with Binding Operators to parse a sentence. """ if not grammar: grammar = "grammars/book_grammars/storage.fcfg" parser = load_parser(grammar, trace=trace, chart_class=InstantiateVarsChart) # Parse the sentence. tokens = sentence.split() return list(parser.parse(tokens)) def demo(): from nltk.sem import cooper_storage as cs sentence = "every girl chases a dog" # sentence = "a man gives a bone to every dog" print() print("Analysis of sentence '%s'" % sentence) print("=" * 50) trees = cs.parse_with_bindops(sentence, trace=0) for tree in trees: semrep = cs.CooperStore(tree.label()["SEM"]) print() print("Binding operators:") print("-" * 15) for s in semrep.store: print(s) print() print("Core:") print("-" * 15) print(semrep.core) print() print("S-Retrieval:") print("-" * 15) semrep.s_retrieve(trace=True) print("Readings:") print("-" * 15) for i, reading in enumerate(semrep.readings): print(f"{i + 1}: {reading}")
null
170,695
import os from itertools import chain import nltk from nltk.internals import Counter from nltk.sem import drt, linearlogic from nltk.sem.logic import ( AbstractVariableExpression, Expression, LambdaExpression, Variable, VariableExpression, ) from nltk.tag import BigramTagger, RegexpTagger, TrigramTagger, UnigramTagger class Glue: def __init__( self, semtype_file=None, remove_duplicates=False, depparser=None, verbose=False ): def train_depparser(self, depgraphs=None): def parse_to_meaning(self, sentence): def get_readings(self, agenda): def _add_to_reading_list(self, glueformula, reading_list): def parse_to_compiled(self, sentence): def dep_parse(self, sentence): def depgraph_to_glue(self, depgraph): def get_glue_dict(self): def gfl_to_compiled(self, gfl): def get_pos_tagger(self): def demo(show_example=-1): from nltk.parse import MaltParser examples = [ "David sees Mary", "David eats a sandwich", "every man chases a dog", "every man believes a dog sleeps", "John gives David a sandwich", "John chases himself", ] # 'John persuades David to order a pizza', # 'John tries to go', # 'John tries to find a unicorn', # 'John seems to vanish', # 'a unicorn seems to approach', # 'every big cat leaves', # 'every gray cat leaves', # 'every big gray cat leaves', # 'a former senator leaves', print("============== DEMO ==============") tagger = RegexpTagger( [ ("^(David|Mary|John)$", "NNP"), ( "^(sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$", "VB", ), ("^(go|order|vanish|find|approach)$", "VB"), ("^(a)$", "ex_quant"), ("^(every)$", "univ_quant"), ("^(sandwich|man|dog|pizza|unicorn|cat|senator)$", "NN"), ("^(big|gray|former)$", "JJ"), ("^(him|himself)$", "PRP"), ] ) depparser = MaltParser(tagger=tagger) glue = Glue(depparser=depparser, verbose=False) for (i, sentence) in enumerate(examples): if i == show_example or show_example == -1: print(f"[[[Example {i}]]] {sentence}") for reading in glue.parse_to_meaning(sentence.split()): print(reading.simplify()) print("")
null
170,696
from nltk.internals import Counter from nltk.sem.logic import APP, LogicParser class Expression: def fromstring(cls, s): def applyto(self, other, other_indices=None): def __call__(self, other): def __repr__(self): def demo(): lexpr = Expression.fromstring print(lexpr(r"f")) print(lexpr(r"(g -o f)")) print(lexpr(r"((g -o G) -o G)")) print(lexpr(r"g -o h -o f")) print(lexpr(r"(g -o f)(g)").simplify()) print(lexpr(r"(H -o f)(g)").simplify()) print(lexpr(r"((g -o G) -o G)((g -o f))").simplify()) print(lexpr(r"(H -o H)((g -o f))").simplify())
null
170,697
from nltk.parse import MaltParser from nltk.sem.drt import DrsDrawer, DrtVariableExpression from nltk.sem.glue import DrtGlue from nltk.sem.logic import Variable from nltk.tag import RegexpTagger from nltk.util import in_idle class DrtGlueDemo: def __init__(self, examples): # Set up the main window. self._top = Tk() self._top.title("DRT Glue Demo") # Set up key bindings. self._init_bindings() # Initialize the fonts.self._error = None self._init_fonts(self._top) self._examples = examples self._readingCache = [None for example in examples] # The user can hide the grammar. self._show_grammar = IntVar(self._top) self._show_grammar.set(1) # Set the data to None self._curExample = -1 self._readings = [] self._drs = None self._drsWidget = None self._error = None self._init_glue() # Create the basic frames. self._init_menubar(self._top) self._init_buttons(self._top) self._init_exampleListbox(self._top) self._init_readingListbox(self._top) self._init_canvas(self._top) # Resize callback self._canvas.bind("<Configure>", self._configure) ######################################### ## Initialization Helpers ######################################### def _init_glue(self): tagger = RegexpTagger( [ ("^(David|Mary|John)$", "NNP"), ( "^(walks|sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$", "VB", ), ("^(go|order|vanish|find|approach)$", "VB"), ("^(a)$", "ex_quant"), ("^(every)$", "univ_quant"), ("^(sandwich|man|dog|pizza|unicorn|cat|senator)$", "NN"), ("^(big|gray|former)$", "JJ"), ("^(him|himself)$", "PRP"), ] ) depparser = MaltParser(tagger=tagger) self._glue = DrtGlue(depparser=depparser, remove_duplicates=False) def _init_fonts(self, root): # See: <http://www.astro.washington.edu/owen/ROTKFolklore.html> self._sysfont = Font(font=Button()["font"]) root.option_add("*Font", self._sysfont) # TWhat's our font size (default=same as sysfont) self._size = IntVar(root) self._size.set(self._sysfont.cget("size")) self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get()) self._font = Font(family="helvetica", size=self._size.get()) if self._size.get() < 0: big = self._size.get() - 2 else: big = self._size.get() + 2 self._bigfont = Font(family="helvetica", weight="bold", size=big) def _init_exampleListbox(self, parent): self._exampleFrame = listframe = Frame(parent) self._exampleFrame.pack(fill="both", side="left", padx=2) self._exampleList_label = Label( self._exampleFrame, font=self._boldfont, text="Examples" ) self._exampleList_label.pack() self._exampleList = Listbox( self._exampleFrame, selectmode="single", relief="groove", background="white", foreground="#909090", font=self._font, selectforeground="#004040", selectbackground="#c0f0c0", ) self._exampleList.pack(side="right", fill="both", expand=1) for example in self._examples: self._exampleList.insert("end", (" %s" % example)) self._exampleList.config(height=min(len(self._examples), 25), width=40) # Add a scrollbar if there are more than 25 examples. if len(self._examples) > 25: listscroll = Scrollbar(self._exampleFrame, orient="vertical") self._exampleList.config(yscrollcommand=listscroll.set) listscroll.config(command=self._exampleList.yview) listscroll.pack(side="left", fill="y") # If they select a example, apply it. self._exampleList.bind("<<ListboxSelect>>", self._exampleList_select) def _init_readingListbox(self, parent): self._readingFrame = listframe = Frame(parent) self._readingFrame.pack(fill="both", side="left", padx=2) self._readingList_label = Label( self._readingFrame, font=self._boldfont, text="Readings" ) self._readingList_label.pack() self._readingList = Listbox( self._readingFrame, selectmode="single", relief="groove", background="white", foreground="#909090", font=self._font, selectforeground="#004040", selectbackground="#c0f0c0", ) self._readingList.pack(side="right", fill="both", expand=1) # Add a scrollbar if there are more than 25 examples. listscroll = Scrollbar(self._readingFrame, orient="vertical") self._readingList.config(yscrollcommand=listscroll.set) listscroll.config(command=self._readingList.yview) listscroll.pack(side="right", fill="y") self._populate_readingListbox() def _populate_readingListbox(self): # Populate the listbox with integers self._readingList.delete(0, "end") for i in range(len(self._readings)): self._readingList.insert("end", (" %s" % (i + 1))) self._readingList.config(height=min(len(self._readings), 25), width=5) # If they select a example, apply it. self._readingList.bind("<<ListboxSelect>>", self._readingList_select) def _init_bindings(self): # Key bindings are a good thing. self._top.bind("<Control-q>", self.destroy) self._top.bind("<Control-x>", self.destroy) self._top.bind("<Escape>", self.destroy) self._top.bind("n", self.next) self._top.bind("<space>", self.next) self._top.bind("p", self.prev) self._top.bind("<BackSpace>", self.prev) def _init_buttons(self, parent): # Set up the frames. self._buttonframe = buttonframe = Frame(parent) buttonframe.pack(fill="none", side="bottom", padx=3, pady=2) Button( buttonframe, text="Prev", background="#90c0d0", foreground="black", command=self.prev, ).pack(side="left") Button( buttonframe, text="Next", background="#90c0d0", foreground="black", command=self.next, ).pack(side="left") def _configure(self, event): self._autostep = 0 (x1, y1, x2, y2) = self._cframe.scrollregion() y2 = event.height - 6 self._canvas["scrollregion"] = "%d %d %d %d" % (x1, y1, x2, y2) self._redraw() def _init_canvas(self, parent): self._cframe = CanvasFrame( parent, background="white", # width=525, height=250, closeenough=10, border=2, relief="sunken", ) self._cframe.pack(expand=1, fill="both", side="top", pady=2) canvas = self._canvas = self._cframe.canvas() # Initially, there's no tree or text self._tree = None self._textwidgets = [] self._textline = None def _init_menubar(self, parent): menubar = Menu(parent) filemenu = Menu(menubar, tearoff=0) filemenu.add_command( label="Exit", underline=1, command=self.destroy, accelerator="q" ) menubar.add_cascade(label="File", underline=0, menu=filemenu) actionmenu = Menu(menubar, tearoff=0) actionmenu.add_command( label="Next", underline=0, command=self.next, accelerator="n, Space" ) actionmenu.add_command( label="Previous", underline=0, command=self.prev, accelerator="p, Backspace" ) menubar.add_cascade(label="Action", underline=0, menu=actionmenu) optionmenu = Menu(menubar, tearoff=0) optionmenu.add_checkbutton( label="Remove Duplicates", underline=0, variable=self._glue.remove_duplicates, command=self._toggle_remove_duplicates, accelerator="r", ) menubar.add_cascade(label="Options", underline=0, menu=optionmenu) viewmenu = Menu(menubar, tearoff=0) viewmenu.add_radiobutton( label="Tiny", variable=self._size, underline=0, value=10, command=self.resize, ) viewmenu.add_radiobutton( label="Small", variable=self._size, underline=0, value=12, command=self.resize, ) viewmenu.add_radiobutton( label="Medium", variable=self._size, underline=0, value=14, command=self.resize, ) viewmenu.add_radiobutton( label="Large", variable=self._size, underline=0, value=18, command=self.resize, ) viewmenu.add_radiobutton( label="Huge", variable=self._size, underline=0, value=24, command=self.resize, ) menubar.add_cascade(label="View", underline=0, menu=viewmenu) helpmenu = Menu(menubar, tearoff=0) helpmenu.add_command(label="About", underline=0, command=self.about) menubar.add_cascade(label="Help", underline=0, menu=helpmenu) parent.config(menu=menubar) ######################################### ## Main draw procedure ######################################### def _redraw(self): canvas = self._canvas # Delete the old DRS, widgets, etc. if self._drsWidget is not None: self._drsWidget.clear() if self._drs: self._drsWidget = DrsWidget(self._canvas, self._drs) self._drsWidget.draw() if self._error: self._drsWidget = DrsWidget(self._canvas, self._error) self._drsWidget.draw() ######################################### ## Button Callbacks ######################################### def destroy(self, *e): self._autostep = 0 if self._top is None: return self._top.destroy() self._top = None def prev(self, *e): selection = self._readingList.curselection() readingListSize = self._readingList.size() # there are readings if readingListSize > 0: # if one reading is currently selected if len(selection) == 1: index = int(selection[0]) # if it's on (or before) the first item if index <= 0: self._select_previous_example() else: self._readingList_store_selection(index - 1) else: # select its first reading self._readingList_store_selection(readingListSize - 1) else: self._select_previous_example() def _select_previous_example(self): # if the current example is not the first example if self._curExample > 0: self._exampleList_store_selection(self._curExample - 1) else: # go to the last example self._exampleList_store_selection(len(self._examples) - 1) def next(self, *e): selection = self._readingList.curselection() readingListSize = self._readingList.size() # if there are readings if readingListSize > 0: # if one reading is currently selected if len(selection) == 1: index = int(selection[0]) # if it's on (or past) the last item if index >= (readingListSize - 1): self._select_next_example() else: self._readingList_store_selection(index + 1) else: # select its first reading self._readingList_store_selection(0) else: self._select_next_example() def _select_next_example(self): # if the current example is not the last example if self._curExample < len(self._examples) - 1: self._exampleList_store_selection(self._curExample + 1) else: # go to the first example self._exampleList_store_selection(0) def about(self, *e): ABOUT = ( "NLTK Discourse Representation Theory (DRT) Glue Semantics Demo\n" + "Written by Daniel H. Garrette" ) TITLE = "About: NLTK DRT Glue Demo" try: from tkinter.messagebox import Message Message(message=ABOUT, title=TITLE).show() except: ShowText(self._top, TITLE, ABOUT) def postscript(self, *e): self._autostep = 0 self._cframe.print_to_file() def mainloop(self, *args, **kwargs): """ Enter the Tkinter mainloop. This function must be called if this demo is created from a non-interactive program (e.g. from a secript); otherwise, the demo will close as soon as the script completes. """ if in_idle(): return self._top.mainloop(*args, **kwargs) def resize(self, size=None): if size is not None: self._size.set(size) size = self._size.get() self._font.configure(size=-(abs(size))) self._boldfont.configure(size=-(abs(size))) self._sysfont.configure(size=-(abs(size))) self._bigfont.configure(size=-(abs(size + 2))) self._redraw() def _toggle_remove_duplicates(self): self._glue.remove_duplicates = not self._glue.remove_duplicates self._exampleList.selection_clear(0, "end") self._readings = [] self._populate_readingListbox() self._readingCache = [None for ex in self._examples] self._curExample = -1 self._error = None self._drs = None self._redraw() def _exampleList_select(self, event): selection = self._exampleList.curselection() if len(selection) != 1: return self._exampleList_store_selection(int(selection[0])) def _exampleList_store_selection(self, index): self._curExample = index example = self._examples[index] self._exampleList.selection_clear(0, "end") if example: cache = self._readingCache[index] if cache: if isinstance(cache, list): self._readings = cache self._error = None else: self._readings = [] self._error = cache else: try: self._readings = self._glue.parse_to_meaning(example) self._error = None self._readingCache[index] = self._readings except Exception as e: self._readings = [] self._error = DrtVariableExpression(Variable("Error: " + str(e))) self._readingCache[index] = self._error # add a star to the end of the example self._exampleList.delete(index) self._exampleList.insert(index, (" %s *" % example)) self._exampleList.config( height=min(len(self._examples), 25), width=40 ) self._populate_readingListbox() self._exampleList.selection_set(index) self._drs = None self._redraw() def _readingList_select(self, event): selection = self._readingList.curselection() if len(selection) != 1: return self._readingList_store_selection(int(selection[0])) def _readingList_store_selection(self, index): reading = self._readings[index] self._readingList.selection_clear(0, "end") if reading: self._readingList.selection_set(index) self._drs = reading.simplify().normalize().resolve_anaphora() self._redraw() def demo(): examples = [ "John walks", "David sees Mary", "David eats a sandwich", "every man chases a dog", # 'every man believes a dog yawns', # 'John gives David a sandwich', "John chases himself", # 'John persuades David to order a pizza', # 'John tries to go', # 'John tries to find a unicorn', # 'John seems to vanish', # 'a unicorn seems to approach', # 'every big cat leaves', # 'every gray cat leaves', # 'every big gray cat leaves', # 'a former senator leaves', # 'John likes a cat', # 'John likes every cat', # 'he walks', # 'John walks and he leaves' ] DrtGlueDemo(examples).mainloop()
null
170,698
import operator import re from collections import defaultdict from functools import reduce, total_ordering from nltk.internals import Counter from nltk.util import Trie class Tokens: LAMBDA = "\\" LAMBDA_LIST = ["\\"] # Quantifiers EXISTS = "exists" EXISTS_LIST = ["some", "exists", "exist"] ALL = "all" ALL_LIST = ["all", "forall"] IOTA = "iota" IOTA_LIST = ["iota"] # Punctuation DOT = "." OPEN = "(" CLOSE = ")" COMMA = "," # Operations NOT = "-" NOT_LIST = ["not", "-", "!"] AND = "&" AND_LIST = ["and", "&", "^"] OR = "|" OR_LIST = ["or", "|"] IMP = "->" IMP_LIST = ["implies", "->", "=>"] IFF = "<->" IFF_LIST = ["iff", "<->", "<=>"] EQ = "=" EQ_LIST = ["=", "=="] NEQ = "!=" NEQ_LIST = ["!="] # Collections of tokens BINOPS = AND_LIST + OR_LIST + IMP_LIST + IFF_LIST QUANTS = EXISTS_LIST + ALL_LIST + IOTA_LIST PUNCT = [DOT, OPEN, CLOSE, COMMA] TOKENS = BINOPS + EQ_LIST + NEQ_LIST + QUANTS + LAMBDA_LIST + PUNCT + NOT_LIST # Special SYMBOLS = [x for x in TOKENS if re.match(r"^[-\\.(),!&^|>=<]*$", x)] The provided code snippet includes necessary dependencies for implementing the `boolean_ops` function. Write a Python function `def boolean_ops()` to solve the following problem: Boolean operators Here is the function: def boolean_ops(): """ Boolean operators """ names = ["negation", "conjunction", "disjunction", "implication", "equivalence"] for pair in zip(names, [Tokens.NOT, Tokens.AND, Tokens.OR, Tokens.IMP, Tokens.IFF]): print("%-15s\t%s" % pair)
Boolean operators
170,699
import operator import re from collections import defaultdict from functools import reduce, total_ordering from nltk.internals import Counter from nltk.util import Trie class Tokens: LAMBDA = "\\" LAMBDA_LIST = ["\\"] # Quantifiers EXISTS = "exists" EXISTS_LIST = ["some", "exists", "exist"] ALL = "all" ALL_LIST = ["all", "forall"] IOTA = "iota" IOTA_LIST = ["iota"] # Punctuation DOT = "." OPEN = "(" CLOSE = ")" COMMA = "," # Operations NOT = "-" NOT_LIST = ["not", "-", "!"] AND = "&" AND_LIST = ["and", "&", "^"] OR = "|" OR_LIST = ["or", "|"] IMP = "->" IMP_LIST = ["implies", "->", "=>"] IFF = "<->" IFF_LIST = ["iff", "<->", "<=>"] EQ = "=" EQ_LIST = ["=", "=="] NEQ = "!=" NEQ_LIST = ["!="] # Collections of tokens BINOPS = AND_LIST + OR_LIST + IMP_LIST + IFF_LIST QUANTS = EXISTS_LIST + ALL_LIST + IOTA_LIST PUNCT = [DOT, OPEN, CLOSE, COMMA] TOKENS = BINOPS + EQ_LIST + NEQ_LIST + QUANTS + LAMBDA_LIST + PUNCT + NOT_LIST # Special SYMBOLS = [x for x in TOKENS if re.match(r"^[-\\.(),!&^|>=<]*$", x)] The provided code snippet includes necessary dependencies for implementing the `equality_preds` function. Write a Python function `def equality_preds()` to solve the following problem: Equality predicates Here is the function: def equality_preds(): """ Equality predicates """ names = ["equality", "inequality"] for pair in zip(names, [Tokens.EQ, Tokens.NEQ]): print("%-15s\t%s" % pair)
Equality predicates
170,700
import operator import re from collections import defaultdict from functools import reduce, total_ordering from nltk.internals import Counter from nltk.util import Trie class Tokens: LAMBDA = "\\" LAMBDA_LIST = ["\\"] # Quantifiers EXISTS = "exists" EXISTS_LIST = ["some", "exists", "exist"] ALL = "all" ALL_LIST = ["all", "forall"] IOTA = "iota" IOTA_LIST = ["iota"] # Punctuation DOT = "." OPEN = "(" CLOSE = ")" COMMA = "," # Operations NOT = "-" NOT_LIST = ["not", "-", "!"] AND = "&" AND_LIST = ["and", "&", "^"] OR = "|" OR_LIST = ["or", "|"] IMP = "->" IMP_LIST = ["implies", "->", "=>"] IFF = "<->" IFF_LIST = ["iff", "<->", "<=>"] EQ = "=" EQ_LIST = ["=", "=="] NEQ = "!=" NEQ_LIST = ["!="] # Collections of tokens BINOPS = AND_LIST + OR_LIST + IMP_LIST + IFF_LIST QUANTS = EXISTS_LIST + ALL_LIST + IOTA_LIST PUNCT = [DOT, OPEN, CLOSE, COMMA] TOKENS = BINOPS + EQ_LIST + NEQ_LIST + QUANTS + LAMBDA_LIST + PUNCT + NOT_LIST # Special SYMBOLS = [x for x in TOKENS if re.match(r"^[-\\.(),!&^|>=<]*$", x)] The provided code snippet includes necessary dependencies for implementing the `binding_ops` function. Write a Python function `def binding_ops()` to solve the following problem: Binding operators Here is the function: def binding_ops(): """ Binding operators """ names = ["existential", "universal", "lambda"] for pair in zip(names, [Tokens.EXISTS, Tokens.ALL, Tokens.LAMBDA, Tokens.IOTA]): print("%-15s\t%s" % pair)
Binding operators
170,701
import operator import re from collections import defaultdict from functools import reduce, total_ordering from nltk.internals import Counter from nltk.util import Trie class LogicParser: """A lambda calculus expression parser.""" def __init__(self, type_check=False): """ :param type_check: should type checking be performed to their types? :type type_check: bool """ assert isinstance(type_check, bool) self._currentIndex = 0 self._buffer = [] self.type_check = type_check """A list of tuples of quote characters. The 4-tuple is comprised of the start character, the end character, the escape character, and a boolean indicating whether the quotes should be included in the result. Quotes are used to signify that a token should be treated as atomic, ignoring any special characters within the token. The escape character allows the quote end character to be used within the quote. If True, the boolean indicates that the final token should contain the quote and escape characters. This method exists to be overridden""" self.quote_chars = [] self.operator_precedence = dict( [(x, 1) for x in Tokens.LAMBDA_LIST] + [(x, 2) for x in Tokens.NOT_LIST] + [(APP, 3)] + [(x, 4) for x in Tokens.EQ_LIST + Tokens.NEQ_LIST] + [(x, 5) for x in Tokens.QUANTS] + [(x, 6) for x in Tokens.AND_LIST] + [(x, 7) for x in Tokens.OR_LIST] + [(x, 8) for x in Tokens.IMP_LIST] + [(x, 9) for x in Tokens.IFF_LIST] + [(None, 10)] ) self.right_associated_operations = [APP] def parse(self, data, signature=None): """ Parse the expression. :param data: str for the input to be parsed :param signature: ``dict<str, str>`` that maps variable names to type strings :returns: a parsed Expression """ data = data.rstrip() self._currentIndex = 0 self._buffer, mapping = self.process(data) try: result = self.process_next_expression(None) if self.inRange(0): raise UnexpectedTokenException(self._currentIndex + 1, self.token(0)) except LogicalExpressionException as e: msg = "{}\n{}\n{}^".format(e, data, " " * mapping[e.index - 1]) raise LogicalExpressionException(None, msg) from e if self.type_check: result.typecheck(signature) return result def process(self, data): """Split the data into tokens""" out = [] mapping = {} tokenTrie = Trie(self.get_all_symbols()) token = "" data_idx = 0 token_start_idx = data_idx while data_idx < len(data): cur_data_idx = data_idx quoted_token, data_idx = self.process_quoted_token(data_idx, data) if quoted_token: if not token: token_start_idx = cur_data_idx token += quoted_token continue st = tokenTrie c = data[data_idx] symbol = "" while c in st: symbol += c st = st[c] if len(data) - data_idx > len(symbol): c = data[data_idx + len(symbol)] else: break if Trie.LEAF in st: # token is a complete symbol if token: mapping[len(out)] = token_start_idx out.append(token) token = "" mapping[len(out)] = data_idx out.append(symbol) data_idx += len(symbol) else: if data[data_idx] in " \t\n": # any whitespace if token: mapping[len(out)] = token_start_idx out.append(token) token = "" else: if not token: token_start_idx = data_idx token += data[data_idx] data_idx += 1 if token: mapping[len(out)] = token_start_idx out.append(token) mapping[len(out)] = len(data) mapping[len(out) + 1] = len(data) + 1 return out, mapping def process_quoted_token(self, data_idx, data): token = "" c = data[data_idx] i = data_idx for start, end, escape, incl_quotes in self.quote_chars: if c == start: if incl_quotes: token += c i += 1 while data[i] != end: if data[i] == escape: if incl_quotes: token += data[i] i += 1 if len(data) == i: # if there are no more chars raise LogicalExpressionException( None, "End of input reached. " "Escape character [%s] found at end." % escape, ) token += data[i] else: token += data[i] i += 1 if len(data) == i: raise LogicalExpressionException( None, "End of input reached. " "Expected: [%s]" % end ) if incl_quotes: token += data[i] i += 1 if not token: raise LogicalExpressionException(None, "Empty quoted token found") break return token, i def get_all_symbols(self): """This method exists to be overridden""" return Tokens.SYMBOLS def inRange(self, location): """Return TRUE if the given location is within the buffer""" return self._currentIndex + location < len(self._buffer) def token(self, location=None): """Get the next waiting token. If a location is given, then return the token at currentIndex+location without advancing currentIndex; setting it gives lookahead/lookback capability.""" try: if location is None: tok = self._buffer[self._currentIndex] self._currentIndex += 1 else: tok = self._buffer[self._currentIndex + location] return tok except IndexError as e: raise ExpectedMoreTokensException(self._currentIndex + 1) from e def isvariable(self, tok): return tok not in Tokens.TOKENS def process_next_expression(self, context): """Parse the next complete expression from the stream and return it.""" try: tok = self.token() except ExpectedMoreTokensException as e: raise ExpectedMoreTokensException( self._currentIndex + 1, message="Expression expected." ) from e accum = self.handle(tok, context) if not accum: raise UnexpectedTokenException( self._currentIndex, tok, message="Expression expected." ) return self.attempt_adjuncts(accum, context) def handle(self, tok, context): """This method is intended to be overridden for logics that use different operators or expressions""" if self.isvariable(tok): return self.handle_variable(tok, context) elif tok in Tokens.NOT_LIST: return self.handle_negation(tok, context) elif tok in Tokens.LAMBDA_LIST: return self.handle_lambda(tok, context) elif tok in Tokens.QUANTS: return self.handle_quant(tok, context) elif tok == Tokens.OPEN: return self.handle_open(tok, context) def attempt_adjuncts(self, expression, context): cur_idx = None while cur_idx != self._currentIndex: # while adjuncts are added cur_idx = self._currentIndex expression = self.attempt_EqualityExpression(expression, context) expression = self.attempt_ApplicationExpression(expression, context) expression = self.attempt_BooleanExpression(expression, context) return expression def handle_negation(self, tok, context): return self.make_NegatedExpression(self.process_next_expression(Tokens.NOT)) def make_NegatedExpression(self, expression): return NegatedExpression(expression) def handle_variable(self, tok, context): # It's either: 1) a predicate expression: sees(x,y) # 2) an application expression: P(x) # 3) a solo variable: john OR x accum = self.make_VariableExpression(tok) if self.inRange(0) and self.token(0) == Tokens.OPEN: # The predicate has arguments if not isinstance(accum, FunctionVariableExpression) and not isinstance( accum, ConstantExpression ): raise LogicalExpressionException( self._currentIndex, "'%s' is an illegal predicate name. " "Individual variables may not be used as " "predicates." % tok, ) self.token() # swallow the Open Paren # curry the arguments accum = self.make_ApplicationExpression( accum, self.process_next_expression(APP) ) while self.inRange(0) and self.token(0) == Tokens.COMMA: self.token() # swallow the comma accum = self.make_ApplicationExpression( accum, self.process_next_expression(APP) ) self.assertNextToken(Tokens.CLOSE) return accum def get_next_token_variable(self, description): try: tok = self.token() except ExpectedMoreTokensException as e: raise ExpectedMoreTokensException(e.index, "Variable expected.") from e if isinstance(self.make_VariableExpression(tok), ConstantExpression): raise LogicalExpressionException( self._currentIndex, "'%s' is an illegal variable name. " "Constants may not be %s." % (tok, description), ) return Variable(tok) def handle_lambda(self, tok, context): # Expression is a lambda expression if not self.inRange(0): raise ExpectedMoreTokensException( self._currentIndex + 2, message="Variable and Expression expected following lambda operator.", ) vars = [self.get_next_token_variable("abstracted")] while True: if not self.inRange(0) or ( self.token(0) == Tokens.DOT and not self.inRange(1) ): raise ExpectedMoreTokensException( self._currentIndex + 2, message="Expression expected." ) if not self.isvariable(self.token(0)): break # Support expressions like: \x y.M == \x.\y.M vars.append(self.get_next_token_variable("abstracted")) if self.inRange(0) and self.token(0) == Tokens.DOT: self.token() # swallow the dot accum = self.process_next_expression(tok) while vars: accum = self.make_LambdaExpression(vars.pop(), accum) return accum def handle_quant(self, tok, context): # Expression is a quantified expression: some x.M factory = self.get_QuantifiedExpression_factory(tok) if not self.inRange(0): raise ExpectedMoreTokensException( self._currentIndex + 2, message="Variable and Expression expected following quantifier '%s'." % tok, ) vars = [self.get_next_token_variable("quantified")] while True: if not self.inRange(0) or ( self.token(0) == Tokens.DOT and not self.inRange(1) ): raise ExpectedMoreTokensException( self._currentIndex + 2, message="Expression expected." ) if not self.isvariable(self.token(0)): break # Support expressions like: some x y.M == some x.some y.M vars.append(self.get_next_token_variable("quantified")) if self.inRange(0) and self.token(0) == Tokens.DOT: self.token() # swallow the dot accum = self.process_next_expression(tok) while vars: accum = self.make_QuanifiedExpression(factory, vars.pop(), accum) return accum def get_QuantifiedExpression_factory(self, tok): """This method serves as a hook for other logic parsers that have different quantifiers""" if tok in Tokens.EXISTS_LIST: return ExistsExpression elif tok in Tokens.ALL_LIST: return AllExpression elif tok in Tokens.IOTA_LIST: return IotaExpression else: self.assertToken(tok, Tokens.QUANTS) def make_QuanifiedExpression(self, factory, variable, term): return factory(variable, term) def handle_open(self, tok, context): # Expression is in parens accum = self.process_next_expression(None) self.assertNextToken(Tokens.CLOSE) return accum def attempt_EqualityExpression(self, expression, context): """Attempt to make an equality expression. If the next token is an equality operator, then an EqualityExpression will be returned. Otherwise, the parameter will be returned.""" if self.inRange(0): tok = self.token(0) if tok in Tokens.EQ_LIST + Tokens.NEQ_LIST and self.has_priority( tok, context ): self.token() # swallow the "=" or "!=" expression = self.make_EqualityExpression( expression, self.process_next_expression(tok) ) if tok in Tokens.NEQ_LIST: expression = self.make_NegatedExpression(expression) return expression def make_EqualityExpression(self, first, second): """This method serves as a hook for other logic parsers that have different equality expression classes""" return EqualityExpression(first, second) def attempt_BooleanExpression(self, expression, context): """Attempt to make a boolean expression. If the next token is a boolean operator, then a BooleanExpression will be returned. Otherwise, the parameter will be returned.""" while self.inRange(0): tok = self.token(0) factory = self.get_BooleanExpression_factory(tok) if factory and self.has_priority(tok, context): self.token() # swallow the operator expression = self.make_BooleanExpression( factory, expression, self.process_next_expression(tok) ) else: break return expression def get_BooleanExpression_factory(self, tok): """This method serves as a hook for other logic parsers that have different boolean operators""" if tok in Tokens.AND_LIST: return AndExpression elif tok in Tokens.OR_LIST: return OrExpression elif tok in Tokens.IMP_LIST: return ImpExpression elif tok in Tokens.IFF_LIST: return IffExpression else: return None def make_BooleanExpression(self, factory, first, second): return factory(first, second) def attempt_ApplicationExpression(self, expression, context): """Attempt to make an application expression. The next tokens are a list of arguments in parens, then the argument expression is a function being applied to the arguments. Otherwise, return the argument expression.""" if self.has_priority(APP, context): if self.inRange(0) and self.token(0) == Tokens.OPEN: if ( not isinstance(expression, LambdaExpression) and not isinstance(expression, ApplicationExpression) and not isinstance(expression, FunctionVariableExpression) and not isinstance(expression, ConstantExpression) ): raise LogicalExpressionException( self._currentIndex, ("The function '%s" % expression) + "' is not a Lambda Expression, an " "Application Expression, or a " "functional predicate, so it may " "not take arguments.", ) self.token() # swallow then open paren # curry the arguments accum = self.make_ApplicationExpression( expression, self.process_next_expression(APP) ) while self.inRange(0) and self.token(0) == Tokens.COMMA: self.token() # swallow the comma accum = self.make_ApplicationExpression( accum, self.process_next_expression(APP) ) self.assertNextToken(Tokens.CLOSE) return accum return expression def make_ApplicationExpression(self, function, argument): return ApplicationExpression(function, argument) def make_VariableExpression(self, name): return VariableExpression(Variable(name)) def make_LambdaExpression(self, variable, term): return LambdaExpression(variable, term) def has_priority(self, operation, context): return self.operator_precedence[operation] < self.operator_precedence[ context ] or ( operation in self.right_associated_operations and self.operator_precedence[operation] == self.operator_precedence[context] ) def assertNextToken(self, expected): try: tok = self.token() except ExpectedMoreTokensException as e: raise ExpectedMoreTokensException( e.index, message="Expected token '%s'." % expected ) from e if isinstance(expected, list): if tok not in expected: raise UnexpectedTokenException(self._currentIndex, tok, expected) else: if tok != expected: raise UnexpectedTokenException(self._currentIndex, tok, expected) def assertToken(self, tok, expected): if isinstance(expected, list): if tok not in expected: raise UnexpectedTokenException(self._currentIndex, tok, expected) else: if tok != expected: raise UnexpectedTokenException(self._currentIndex, tok, expected) def __repr__(self): if self.inRange(0): msg = "Next token: " + self.token(0) else: msg = "No more tokens" return "<" + self.__class__.__name__ + ": " + msg + ">" class LogicalExpressionException(Exception): def __init__(self, index, message): self.index = index Exception.__init__(self, message) The provided code snippet includes necessary dependencies for implementing the `read_logic` function. Write a Python function `def read_logic(s, logic_parser=None, encoding=None)` to solve the following problem: Convert a file of First Order Formulas into a list of {Expression}s. :param s: the contents of the file :type s: str :param logic_parser: The parser to be used to parse the logical expression :type logic_parser: LogicParser :param encoding: the encoding of the input string, if it is binary :type encoding: str :return: a list of parsed formulas. :rtype: list(Expression) Here is the function: def read_logic(s, logic_parser=None, encoding=None): """ Convert a file of First Order Formulas into a list of {Expression}s. :param s: the contents of the file :type s: str :param logic_parser: The parser to be used to parse the logical expression :type logic_parser: LogicParser :param encoding: the encoding of the input string, if it is binary :type encoding: str :return: a list of parsed formulas. :rtype: list(Expression) """ if encoding is not None: s = s.decode(encoding) if logic_parser is None: logic_parser = LogicParser() statements = [] for linenum, line in enumerate(s.splitlines()): line = line.strip() if line.startswith("#") or line == "": continue try: statements.append(logic_parser.parse(line)) except LogicalExpressionException as e: raise ValueError(f"Unable to parse line {linenum}: {line}") from e return statements
Convert a file of First Order Formulas into a list of {Expression}s. :param s: the contents of the file :type s: str :param logic_parser: The parser to be used to parse the logical expression :type logic_parser: LogicParser :param encoding: the encoding of the input string, if it is binary :type encoding: str :return: a list of parsed formulas. :rtype: list(Expression)
170,702
import operator import re from collections import defaultdict from functools import reduce, total_ordering from nltk.internals import Counter from nltk.util import Trie class ComplexType(Type): def __init__(self, first, second): assert isinstance(first, Type), "%s is not a Type" % first assert isinstance(second, Type), "%s is not a Type" % second self.first = first self.second = second def __eq__(self, other): return ( isinstance(other, ComplexType) and self.first == other.first and self.second == other.second ) def __ne__(self, other): return not self == other __hash__ = Type.__hash__ def matches(self, other): if isinstance(other, ComplexType): return self.first.matches(other.first) and self.second.matches(other.second) else: return self == ANY_TYPE def resolve(self, other): if other == ANY_TYPE: return self elif isinstance(other, ComplexType): f = self.first.resolve(other.first) s = self.second.resolve(other.second) if f and s: return ComplexType(f, s) else: return None elif self == ANY_TYPE: return other else: return None def __str__(self): if self == ANY_TYPE: return "%s" % ANY_TYPE else: return f"<{self.first},{self.second}>" def str(self): if self == ANY_TYPE: return ANY_TYPE.str() else: return f"({self.first.str()} -> {self.second.str()})" TRUTH_TYPE = TruthValueType() ENTITY_TYPE = EntityType() ANY_TYPE = AnyType() class LogicalExpressionException(Exception): def __init__(self, index, message): self.index = index Exception.__init__(self, message) def read_type(type_string): assert isinstance(type_string, str) type_string = type_string.replace(" ", "") # remove spaces if type_string[0] == "<": assert type_string[-1] == ">" paren_count = 0 for i, char in enumerate(type_string): if char == "<": paren_count += 1 elif char == ">": paren_count -= 1 assert paren_count > 0 elif char == ",": if paren_count == 1: break return ComplexType( read_type(type_string[1:i]), read_type(type_string[i + 1 : -1]) ) elif type_string[0] == "%s" % ENTITY_TYPE: return ENTITY_TYPE elif type_string[0] == "%s" % TRUTH_TYPE: return TRUTH_TYPE elif type_string[0] == "%s" % ANY_TYPE: return ANY_TYPE else: raise LogicalExpressionException( None, "Unexpected character: '%s'." % type_string[0] )
null
170,703
import operator import re from collections import defaultdict from functools import reduce, total_ordering from nltk.internals import Counter from nltk.util import Trie The provided code snippet includes necessary dependencies for implementing the `typecheck` function. Write a Python function `def typecheck(expressions, signature=None)` to solve the following problem: Ensure correct typing across a collection of ``Expression`` objects. :param expressions: a collection of expressions :param signature: dict that maps variable names to types (or string representations of types) Here is the function: def typecheck(expressions, signature=None): """ Ensure correct typing across a collection of ``Expression`` objects. :param expressions: a collection of expressions :param signature: dict that maps variable names to types (or string representations of types) """ # typecheck and create master signature for expression in expressions: signature = expression.typecheck(signature) # apply master signature to all expressions for expression in expressions[:-1]: expression.typecheck(signature) return signature
Ensure correct typing across a collection of ``Expression`` objects. :param expressions: a collection of expressions :param signature: dict that maps variable names to types (or string representations of types)
170,704
import operator import re from collections import defaultdict from functools import reduce, total_ordering from nltk.internals import Counter from nltk.util import Trie class Variable: def __init__(self, name): """ :param name: the name of the variable """ assert isinstance(name, str), "%s is not a string" % name self.name = name def __eq__(self, other): return isinstance(other, Variable) and self.name == other.name def __ne__(self, other): return not self == other def __lt__(self, other): if not isinstance(other, Variable): raise TypeError return self.name < other.name def substitute_bindings(self, bindings): return bindings.get(self, self) def __hash__(self): return hash(self.name) def __str__(self): return self.name def __repr__(self): return "Variable('%s')" % self.name class Expression(SubstituteBindingsI): """This is the base abstract object for all logical expressions""" _logic_parser = LogicParser() _type_checking_logic_parser = LogicParser(type_check=True) def fromstring(cls, s, type_check=False, signature=None): if type_check: return cls._type_checking_logic_parser.parse(s, signature) else: return cls._logic_parser.parse(s, signature) def __call__(self, other, *additional): accum = self.applyto(other) for a in additional: accum = accum(a) return accum def applyto(self, other): assert isinstance(other, Expression), "%s is not an Expression" % other return ApplicationExpression(self, other) def __neg__(self): return NegatedExpression(self) def negate(self): """If this is a negated expression, remove the negation. Otherwise add a negation.""" return -self def __and__(self, other): if not isinstance(other, Expression): raise TypeError("%s is not an Expression" % other) return AndExpression(self, other) def __or__(self, other): if not isinstance(other, Expression): raise TypeError("%s is not an Expression" % other) return OrExpression(self, other) def __gt__(self, other): if not isinstance(other, Expression): raise TypeError("%s is not an Expression" % other) return ImpExpression(self, other) def __lt__(self, other): if not isinstance(other, Expression): raise TypeError("%s is not an Expression" % other) return IffExpression(self, other) def __eq__(self, other): return NotImplemented def __ne__(self, other): return not self == other def equiv(self, other, prover=None): """ Check for logical equivalence. Pass the expression (self <-> other) to the theorem prover. If the prover says it is valid, then the self and other are equal. :param other: an ``Expression`` to check equality against :param prover: a ``nltk.inference.api.Prover`` """ assert isinstance(other, Expression), "%s is not an Expression" % other if prover is None: from nltk.inference import Prover9 prover = Prover9() bicond = IffExpression(self.simplify(), other.simplify()) return prover.prove(bicond) def __hash__(self): return hash(repr(self)) def substitute_bindings(self, bindings): expr = self for var in expr.variables(): if var in bindings: val = bindings[var] if isinstance(val, Variable): val = self.make_VariableExpression(val) elif not isinstance(val, Expression): raise ValueError( "Can not substitute a non-expression " "value into an expression: %r" % (val,) ) # Substitute bindings in the target value. val = val.substitute_bindings(bindings) # Replace var w/ the target value. expr = expr.replace(var, val) return expr.simplify() def typecheck(self, signature=None): """ Infer and check types. Raise exceptions if necessary. :param signature: dict that maps variable names to types (or string representations of types) :return: the signature, plus any additional type mappings """ sig = defaultdict(list) if signature: for key in signature: val = signature[key] varEx = VariableExpression(Variable(key)) if isinstance(val, Type): varEx.type = val else: varEx.type = read_type(val) sig[key].append(varEx) self._set_type(signature=sig) return {key: sig[key][0].type for key in sig} def findtype(self, variable): """ Find the type of the given variable as it is used in this expression. For example, finding the type of "P" in "P(x) & Q(x,y)" yields "<e,t>" :param variable: Variable """ raise NotImplementedError() def _set_type(self, other_type=ANY_TYPE, signature=None): """ Set the type of this expression to be the given type. Raise type exceptions where applicable. :param other_type: Type :param signature: dict(str -> list(AbstractVariableExpression)) """ raise NotImplementedError() def replace(self, variable, expression, replace_bound=False, alpha_convert=True): """ Replace every instance of 'variable' with 'expression' :param variable: ``Variable`` The variable to replace :param expression: ``Expression`` The expression with which to replace it :param replace_bound: bool Should bound variables be replaced? :param alpha_convert: bool Alpha convert automatically to avoid name clashes? """ assert isinstance(variable, Variable), "%s is not a Variable" % variable assert isinstance(expression, Expression), ( "%s is not an Expression" % expression ) return self.visit_structured( lambda e: e.replace(variable, expression, replace_bound, alpha_convert), self.__class__, ) def normalize(self, newvars=None): """Rename auto-generated unique variables""" def get_indiv_vars(e): if isinstance(e, IndividualVariableExpression): return {e} elif isinstance(e, AbstractVariableExpression): return set() else: return e.visit( get_indiv_vars, lambda parts: reduce(operator.or_, parts, set()) ) result = self for i, e in enumerate(sorted(get_indiv_vars(self), key=lambda e: e.variable)): if isinstance(e, EventVariableExpression): newVar = e.__class__(Variable("e0%s" % (i + 1))) elif isinstance(e, IndividualVariableExpression): newVar = e.__class__(Variable("z%s" % (i + 1))) else: newVar = e result = result.replace(e.variable, newVar, True) return result def visit(self, function, combinator): """ Recursively visit subexpressions. Apply 'function' to each subexpression and pass the result of each function application to the 'combinator' for aggregation: return combinator(map(function, self.subexpressions)) Bound variables are neither applied upon by the function nor given to the combinator. :param function: ``Function<Expression,T>`` to call on each subexpression :param combinator: ``Function<list<T>,R>`` to combine the results of the function calls :return: result of combination ``R`` """ raise NotImplementedError() def visit_structured(self, function, combinator): """ Recursively visit subexpressions. Apply 'function' to each subexpression and pass the result of each function application to the 'combinator' for aggregation. The combinator must have the same signature as the constructor. The function is not applied to bound variables, but they are passed to the combinator. :param function: ``Function`` to call on each subexpression :param combinator: ``Function`` with the same signature as the constructor, to combine the results of the function calls :return: result of combination """ return self.visit(function, lambda parts: combinator(*parts)) def __repr__(self): return f"<{self.__class__.__name__} {self}>" def __str__(self): return self.str() def variables(self): """ Return a set of all the variables for binding substitution. The variables returned include all free (non-bound) individual variables and any variable starting with '?' or '@'. :return: set of ``Variable`` objects """ return self.free() | { p for p in self.predicates() | self.constants() if re.match("^[?@]", p.name) } def free(self): """ Return a set of all the free (non-bound) variables. This includes both individual and predicate variables, but not constants. :return: set of ``Variable`` objects """ return self.visit( lambda e: e.free(), lambda parts: reduce(operator.or_, parts, set()) ) def constants(self): """ Return a set of individual constants (non-predicates). :return: set of ``Variable`` objects """ return self.visit( lambda e: e.constants(), lambda parts: reduce(operator.or_, parts, set()) ) def predicates(self): """ Return a set of predicates (constants, not variables). :return: set of ``Variable`` objects """ return self.visit( lambda e: e.predicates(), lambda parts: reduce(operator.or_, parts, set()) ) def simplify(self): """ :return: beta-converted version of this expression """ return self.visit_structured(lambda e: e.simplify(), self.__class__) def make_VariableExpression(self, variable): return VariableExpression(variable) def demo(): lexpr = Expression.fromstring print("=" * 20 + "Test reader" + "=" * 20) print(lexpr(r"john")) print(lexpr(r"man(x)")) print(lexpr(r"-man(x)")) print(lexpr(r"(man(x) & tall(x) & walks(x))")) print(lexpr(r"exists x.(man(x) & tall(x) & walks(x))")) print(lexpr(r"\x.man(x)")) print(lexpr(r"\x.man(x)(john)")) print(lexpr(r"\x y.sees(x,y)")) print(lexpr(r"\x y.sees(x,y)(a,b)")) print(lexpr(r"(\x.exists y.walks(x,y))(x)")) print(lexpr(r"exists x.x = y")) print(lexpr(r"exists x.(x = y)")) print(lexpr("P(x) & x=y & P(y)")) print(lexpr(r"\P Q.exists x.(P(x) & Q(x))")) print(lexpr(r"man(x) <-> tall(x)")) print("=" * 20 + "Test simplify" + "=" * 20) print(lexpr(r"\x.\y.sees(x,y)(john)(mary)").simplify()) print(lexpr(r"\x.\y.sees(x,y)(john, mary)").simplify()) print(lexpr(r"all x.(man(x) & (\x.exists y.walks(x,y))(x))").simplify()) print(lexpr(r"(\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x))(\x.bark(x))").simplify()) print("=" * 20 + "Test alpha conversion and binder expression equality" + "=" * 20) e1 = lexpr("exists x.P(x)") print(e1) e2 = e1.alpha_convert(Variable("z")) print(e2) print(e1 == e2)
null
170,705
import operator import re from collections import defaultdict from functools import reduce, total_ordering from nltk.internals import Counter from nltk.util import Trie def demoException(s): try: Expression.fromstring(s) except LogicalExpressionException as e: print(f"{e.__class__.__name__}: {e}") def demo_errors(): print("=" * 20 + "Test reader errors" + "=" * 20) demoException("(P(x) & Q(x)") demoException("((P(x) &) & Q(x))") demoException("P(x) -> ") demoException("P(x") demoException("P(x,") demoException("P(x,)") demoException("exists") demoException("exists x.") demoException("\\") demoException("\\ x y.") demoException("P(x)Q(x)") demoException("(P(x)Q(x)") demoException("exists x -> y")
null
170,706
import operator import re from collections import defaultdict from functools import reduce, total_ordering from nltk.internals import Counter from nltk.util import Trie def printtype(ex): print(f"{ex.str()} : {ex.type}")
null
170,707
from functools import reduce from nltk.parse import load_parser from nltk.sem.logic import ( AllExpression, AndExpression, ApplicationExpression, ExistsExpression, IffExpression, ImpExpression, LambdaExpression, NegatedExpression, OrExpression, ) from nltk.sem.skolemize import skolemize class HoleSemantics: def __init__(self, usr): def is_node(self, x): def _break_down(self, usr): def _find_top_nodes(self, node_list): def _find_top_most_labels(self): def _find_top_hole(self): def pluggings(self): def _plug_nodes(self, queue, potential_labels, plug_acc, record): def _plug_hole(self, hole, ancestors0, queue, potential_labels0, plug_acc0, record): def _violates_constraints(self, label, ancestors): def _sanity_check_plugging(self, plugging, node, ancestors): def formula_tree(self, plugging): def _formula_tree(self, plugging, node): class LambdaExpression(VariableBinderExpression): def type(self): def _set_type(self, other_type=ANY_TYPE, signature=None): def __str__(self): def skolemize(expression, univ_scope=None, used_variables=None): def hole_readings(sentence, grammar_filename=None, verbose=False): if not grammar_filename: grammar_filename = "grammars/sample_grammars/hole.fcfg" if verbose: print("Reading grammar file", grammar_filename) parser = load_parser(grammar_filename) # Parse the sentence. tokens = sentence.split() trees = list(parser.parse(tokens)) if verbose: print("Got %d different parses" % len(trees)) all_readings = [] for tree in trees: # Get the semantic feature from the top of the parse tree. sem = tree.label()["SEM"].simplify() # Print the raw semantic representation. if verbose: print("Raw: ", sem) # Skolemize away all quantifiers. All variables become unique. while isinstance(sem, LambdaExpression): sem = sem.term skolemized = skolemize(sem) if verbose: print("Skolemized:", skolemized) # Break the hole semantics representation down into its components # i.e. holes, labels, formula fragments and constraints. hole_sem = HoleSemantics(skolemized) # Maybe show the details of the semantic representation. if verbose: print("Holes: ", hole_sem.holes) print("Labels: ", hole_sem.labels) print("Constraints: ", hole_sem.constraints) print("Top hole: ", hole_sem.top_hole) print("Top labels: ", hole_sem.top_most_labels) print("Fragments:") for l, f in hole_sem.fragments.items(): print(f"\t{l}: {f}") # Find all the possible ways to plug the formulas together. pluggings = hole_sem.pluggings() # Build FOL formula trees using the pluggings. readings = list(map(hole_sem.formula_tree, pluggings)) # Print out the formulas in a textual format. if verbose: for i, r in enumerate(readings): print() print("%d. %s" % (i, r)) print() all_readings.extend(readings) return all_readings
null
170,708
import os import re import shelve import sys import nltk.data def _str2records(filename, rel): """ Read a file into memory and convert each relation clause into a list. """ recs = [] contents = nltk.data.load("corpora/chat80/%s" % filename, format="text") for line in contents.splitlines(): if line.startswith(rel): line = re.sub(rel + r"\(", "", line) line = re.sub(r"\)\.$", "", line) record = line.split(",") recs.append(record) return recs The provided code snippet includes necessary dependencies for implementing the `cities2table` function. Write a Python function `def cities2table(filename, rel_name, dbname, verbose=False, setup=False)` to solve the following problem: Convert a file of Prolog clauses into a database table. This is not generic, since it doesn't allow arbitrary schemas to be set as a parameter. Intended usage:: cities2table('cities.pl', 'city', 'city.db', verbose=True, setup=True) :param filename: filename containing the relations :type filename: str :param rel_name: name of the relation :type rel_name: str :param dbname: filename of persistent store :type schema: str Here is the function: def cities2table(filename, rel_name, dbname, verbose=False, setup=False): """ Convert a file of Prolog clauses into a database table. This is not generic, since it doesn't allow arbitrary schemas to be set as a parameter. Intended usage:: cities2table('cities.pl', 'city', 'city.db', verbose=True, setup=True) :param filename: filename containing the relations :type filename: str :param rel_name: name of the relation :type rel_name: str :param dbname: filename of persistent store :type schema: str """ import sqlite3 records = _str2records(filename, rel_name) connection = sqlite3.connect(dbname) cur = connection.cursor() if setup: cur.execute( """CREATE TABLE city_table (City text, Country text, Population int)""" ) table_name = "city_table" for t in records: cur.execute("insert into %s values (?,?,?)" % table_name, t) if verbose: print("inserting values into %s: " % table_name, t) connection.commit() if verbose: print("Committing update to %s" % dbname) cur.close()
Convert a file of Prolog clauses into a database table. This is not generic, since it doesn't allow arbitrary schemas to be set as a parameter. Intended usage:: cities2table('cities.pl', 'city', 'city.db', verbose=True, setup=True) :param filename: filename containing the relations :type filename: str :param rel_name: name of the relation :type rel_name: str :param dbname: filename of persistent store :type schema: str
170,709
import os import re import shelve import sys import nltk.data def process_bundle(rels): """ Given a list of relation metadata bundles, make a corresponding dictionary of concepts, indexed by the relation name. :param rels: bundle of metadata needed for constructing a concept :type rels: list(dict) :return: a dictionary of concepts, indexed by the relation name. :rtype: dict(str): Concept """ concepts = {} for rel in rels: rel_name = rel["rel_name"] closures = rel["closures"] schema = rel["schema"] filename = rel["filename"] concept_list = clause2concepts(filename, rel_name, schema, closures) for c in concept_list: label = c.prefLabel if label in concepts: for data in c.extension: concepts[label].augment(data) concepts[label].close() else: concepts[label] = c return concepts def make_valuation(concepts, read=False, lexicon=False): """ Convert a list of ``Concept`` objects into a list of (label, extension) pairs; optionally create a ``Valuation`` object. :param concepts: concepts :type concepts: list(Concept) :param read: if ``True``, ``(symbol, set)`` pairs are read into a ``Valuation`` :type read: bool :rtype: list or Valuation """ vals = [] for c in concepts: vals.append((c.prefLabel, c.extension)) if lexicon: read = True if read: from nltk.sem import Valuation val = Valuation({}) val.update(vals) # add labels for individuals val = label_indivs(val, lexicon=lexicon) return val else: return vals def concepts(items=items): """ Build a list of concepts corresponding to the relation names in ``items``. :param items: names of the Chat-80 relations to extract :type items: list(str) :return: the ``Concept`` objects which are extracted from the relations :rtype: list(Concept) """ if isinstance(items, str): items = (items,) rels = [item_metadata[r] for r in items] concept_map = process_bundle(rels) return concept_map.values() The provided code snippet includes necessary dependencies for implementing the `val_dump` function. Write a Python function `def val_dump(rels, db)` to solve the following problem: Make a ``Valuation`` from a list of relation metadata bundles and dump to persistent database. :param rels: bundle of metadata needed for constructing a concept :type rels: list of dict :param db: name of file to which data is written. The suffix '.db' will be automatically appended. :type db: str Here is the function: def val_dump(rels, db): """ Make a ``Valuation`` from a list of relation metadata bundles and dump to persistent database. :param rels: bundle of metadata needed for constructing a concept :type rels: list of dict :param db: name of file to which data is written. The suffix '.db' will be automatically appended. :type db: str """ concepts = process_bundle(rels).values() valuation = make_valuation(concepts, read=True) db_out = shelve.open(db, "n") db_out.update(valuation) db_out.close()
Make a ``Valuation`` from a list of relation metadata bundles and dump to persistent database. :param rels: bundle of metadata needed for constructing a concept :type rels: list of dict :param db: name of file to which data is written. The suffix '.db' will be automatically appended. :type db: str